From: Colin LeMahieu Date: Wed, 28 Jan 2015 16:58:05 +0000 (+0000) Subject: [Hexagon] Replacing old intrinsic tests with organized versions that match the refere... X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=0f5f7c0c1b5e2b3e9096135f9a2054049dedd198;p=oota-llvm.git [Hexagon] Replacing old intrinsic tests with organized versions that match the reference manual. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227321 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/Hexagon/intrinsics-alu32-2.ll b/test/CodeGen/Hexagon/intrinsics-alu32-2.ll deleted file mode 100644 index 719c92584b6..00000000000 --- a/test/CodeGen/Hexagon/intrinsics-alu32-2.ll +++ /dev/null @@ -1,178 +0,0 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s - -; Verify that ALU32 - add, or, and, sub, combine intrinsics -; are lowered to the right instructions. - -@e = external global i1 -@b = external global i8 -@d = external global i32 -@c = external global i64 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test1(i32 %a, i32 %b) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sub(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test2(i32 %a, i32 %b) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}and(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test3(i32 %a, i32 %b) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}or(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test4(i32 %a, i32 %b) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}xor(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test5(i32 %a, i32 %b) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}combine(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test6(i32 %a, i32 %b) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b) - store i64 %0, i64* @c, align 8 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}#-31849) - -define void @test7(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.addi(i32 %a, i32 -31849) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}and(r{{[0-9]+}}{{ *}},{{ *}}#-512) - -define void @test8(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.andir(i32 %a, i32 -512) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}or(r{{[0-9]+}}{{ *}},{{ *}}#511) - -define void @test9(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.orir(i32 %a, i32 511) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sub(#508{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test10(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.subri(i32 508, i32 %a) - store i32 %0, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}.l{{ *}}={{ *}}#48242 - -define void @test11() #0 { -entry: - %0 = load i32* @d, align 4 - %1 = tail call i32 @llvm.hexagon.A2.tfril(i32 %0, i32 48242) - store i32 %1, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}.h{{ *}}={{ *}}#50826 - -define void @test12() #0 { -entry: - %0 = load i32* @d, align 4 - %1 = tail call i32 @llvm.hexagon.A2.tfrih(i32 %0, i32 50826) - store i32 %1, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mux(p{{[0-3]+}}{{ *}},{{ *}}r{{[0-9]+}}{{ *}},{{ *}}##71230) - -define void @test21(i32 %a) #0 { -entry: - %0 = load i8* @b, align 1 - %conv = zext i8 %0 to i32 - %1 = tail call i32 @llvm.hexagon.C2.muxir(i32 %conv, i32 %a, i32 71230) - store i32 %1, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mux(p{{[0-3]+}}{{ *}},{{ *}}##5000{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test23(i32 %a) #0 { -entry: - %0 = load i8* @b, align 1 - %conv = zext i8 %0 to i32 - %1 = tail call i32 @llvm.hexagon.C2.muxri(i32 %conv, i32 5000, i32 %a) - store i32 %1, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mux(p{{[0-3]+}}{{ *}},{{ *}}##-4900{{ *}},{{ *}}#94) - -define void @test24(i32 %a) #0 { -entry: - %0 = load i8* @b, align 1 - %conv = zext i8 %0 to i32 - %1 = tail call i32 @llvm.hexagon.C2.muxii(i32 %conv, i32 -4900, i32 94) - store i32 %1, i32* @d, align 4 - ret void -} - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}combine(##-1280{{ *}},{{ *}}#120) - -define void @test25(i32 %a) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.A2.combineii(i32 -1280, i32 120) - store i64 %0, i64* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.A2.add(i32, i32) #1 -declare i32 @llvm.hexagon.A2.sub(i32, i32) #1 -declare i32 @llvm.hexagon.A2.and(i32, i32) #1 -declare i32 @llvm.hexagon.A2.or(i32, i32) #1 -declare i32 @llvm.hexagon.A2.xor(i32, i32) #1 -declare i64 @llvm.hexagon.A2.combinew(i32, i32) #1 -declare i32 @llvm.hexagon.A2.addi(i32, i32) #1 -declare i32 @llvm.hexagon.A2.andir(i32, i32) #1 -declare i32 @llvm.hexagon.A2.orir(i32, i32) #1 -declare i32 @llvm.hexagon.A2.subri(i32, i32) -declare i32 @llvm.hexagon.A2.tfril(i32, i32) #1 -declare i32 @llvm.hexagon.A2.tfrih(i32, i32) #1 -declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32) #1 -declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32) #1 -declare i32 @llvm.hexagon.C2.muxii(i32, i32, i32) #1 -declare i64 @llvm.hexagon.A2.combineii(i32, i32) #1 diff --git a/test/CodeGen/Hexagon/intrinsics-alu32.ll b/test/CodeGen/Hexagon/intrinsics-alu32.ll deleted file mode 100644 index 2218e26275e..00000000000 --- a/test/CodeGen/Hexagon/intrinsics-alu32.ll +++ /dev/null @@ -1,83 +0,0 @@ -; RUN: llc -march=hexagon < %s | FileCheck %s - -; Verify that ALU32 - aslh, asrh, sxth, sxtb, zxth, zxtb intrinsics -; are lowered to the right instructions. - -@c = external global i64 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}aslh({{ *}}r{{[0-9]+}}{{ *}}) -define void @test1(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.aslh(i32 %a) - %conv = sext i32 %0 to i64 - store i64 %conv, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.A2.aslh(i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}asrh({{ *}}r{{[0-9]+}}{{ *}}) -define void @test2(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.asrh(i32 %a) - %conv = sext i32 %0 to i64 - store i64 %conv, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.A2.asrh(i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sxtb({{ *}}r{{[0-9]+}}{{ *}}) -define void @test3(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.sxtb(i32 %a) - %conv = sext i32 %0 to i64 - store i64 %conv, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.A2.sxtb(i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}sxth({{ *}}r{{[0-9]+}}{{ *}}) -define void @test4(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.sxth(i32 %a) - %conv = sext i32 %0 to i64 - store i64 %conv, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.A2.sxth(i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}zxtb({{ *}}r{{[0-9]+}}{{ *}}) -define void @test6(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.zxtb(i32 %a) - %conv = sext i32 %0 to i64 - store i64 %conv, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.A2.zxtb(i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}zxth({{ *}}r{{[0-9]+}}{{ *}}) -define void @test7(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.zxth(i32 %a) - %conv = sext i32 %0 to i64 - store i64 %conv, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.A2.zxth(i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}asrh({{ *}}r{{[0-9]+}}{{ *}}) -define void @test8(i32 %a) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32 %a) - %conv = sext i32 %0 to i64 - store i64 %conv, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.SI.to.SXTHI.asrh(i32) #1 diff --git a/test/CodeGen/Hexagon/intrinsics-alu32_3op.ll b/test/CodeGen/Hexagon/intrinsics-alu32_3op.ll deleted file mode 100644 index b2bf439e1f7..00000000000 --- a/test/CodeGen/Hexagon/intrinsics-alu32_3op.ll +++ /dev/null @@ -1,34 +0,0 @@ -; RUN: llc < %s | FileCheck %s - -target datalayout = "e-m:e-p:32:32-i1:32-i64:64-a:0-v32:32-n16:32" -target triple = "hexagon" - -; CHECK: test13: -; CHECK: r{{[0-9]+}} = add(r{{[0-9]+}}, r{{[0-9]+}}):sat -define i32 @test13(i32 %Rs, i32 %Rt) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.addsat(i32 %Rs, i32 %Rt) - ret i32 %0 -} - - -; CHECK: test14: -; CHECK: r{{[0-9]+}} = sub(r{{[0-9]+}}, r{{[0-9]+}}):sat -define i32 @test14(i32 %Rs, i32 %Rt) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.A2.subsat(i32 %Rs, i32 %Rt) - ret i32 %0 -} - - -; CHECK: test61: -; CHECK: r{{[0-9]+:[0-9]+}} = packhl(r{{[0-9]+}}, r{{[0-9]+}}) -define i64 @test61(i32 %Rs, i32 %Rt) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.S2.packhl(i32 %Rs, i32 %Rt) - ret i64 %0 -} - -declare i32 @llvm.hexagon.A2.addsat(i32, i32) #1 -declare i32 @llvm.hexagon.A2.subsat(i32, i32) #1 -declare i64 @llvm.hexagon.S2.packhl(i32, i32) #1 diff --git a/test/CodeGen/Hexagon/intrinsics-mpy-acc.ll b/test/CodeGen/Hexagon/intrinsics-mpy-acc.ll deleted file mode 100644 index a1639aabf13..00000000000 --- a/test/CodeGen/Hexagon/intrinsics-mpy-acc.ll +++ /dev/null @@ -1,120 +0,0 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s - -; Verify that the mpy intrinsics with add/subtract are being lowered to the right instruction. - -@c = external global i64 - -; CHECK: r{{[0-9]+}}{{ *}}+{{ *}}={{ *}}mpyi(r{{[0-9]+}}{{ *}},{{ *}}#124) - -define void @test1(i32 %a) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %1 = tail call i32 @llvm.hexagon.M2.macsip(i32 %conv, i32 %a, i32 124) - %conv1 = sext i32 %1 to i64 - store i64 %conv1, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.macsip(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-{{ *}}={{ *}}mpyi(r{{[0-9]+}}{{ *}},{{ *}}#166) - -define void @test2(i32 %a) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %1 = tail call i32 @llvm.hexagon.M2.macsin(i32 %conv, i32 %a, i32 166) - %conv1 = sext i32 %1 to i64 - store i64 %conv1, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.macsin(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+{{ *}}={{ *}}mpyi(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test3(i32 %a, i32 %b) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %1 = tail call i32 @llvm.hexagon.M2.maci(i32 %conv, i32 %a, i32 %b) - %conv1 = sext i32 %1 to i64 - store i64 %conv1, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.maci(i32, i32, i32) #1 - -@d = external global i32 - -; CHECK: r{{[0-9]+}}{{ *}}+{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}#40) - -define void @test7(i32 %a) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %1 = tail call i32 @llvm.hexagon.M2.accii(i32 %conv, i32 %a, i32 40) - %conv1 = sext i32 %1 to i64 - store i64 %conv1, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.accii(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}#100) - -define void @test8(i32 %a) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %1 = tail call i32 @llvm.hexagon.M2.naccii(i32 %conv, i32 %a, i32 100) - %conv1 = sext i32 %1 to i64 - store i64 %conv1, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.naccii(i32, i32, i32) #1 - - -; CHECK: r{{[0-9]+}}{{ *}}+{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test9(i32 %a, i32 %b) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %1 = tail call i32 @llvm.hexagon.M2.acci(i32 %conv, i32 %a, i32 %b) - %conv1 = sext i32 %1 to i64 - store i64 %conv1, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.acci(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+{{ *}}={{ *}}sub(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test10(i32 %a, i32 %b) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %1 = tail call i32 @llvm.hexagon.M2.subacc(i32 %conv, i32 %a, i32 %b) - %conv1 = sext i32 %1 to i64 - store i64 %conv1, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.subacc(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-{{ *}}={{ *}}add(r{{[0-9]+}}{{ *}},{{ *}}r{{[0-9]+}}) - -define void @test11(i32 %a, i32 %b) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %1 = tail call i32 @llvm.hexagon.M2.nacci(i32 %conv, i32 %a, i32 %b) - %conv1 = sext i32 %1 to i64 - store i64 %conv1, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.nacci(i32, i32, i32) #1 diff --git a/test/CodeGen/Hexagon/intrinsics-mpy.ll b/test/CodeGen/Hexagon/intrinsics-mpy.ll deleted file mode 100644 index 6b7eddf7cce..00000000000 --- a/test/CodeGen/Hexagon/intrinsics-mpy.ll +++ /dev/null @@ -1,445 +0,0 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s - -; Verify that the mpy intrinsics are lowered into the right instructions. - -@c = external global i32 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test1(i32 %a1, i32 %b1) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.ll.s0(i32 %a1, i32 %b1) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.ll.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test2(i32 %a2, i32 %b2) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.lh.s0(i32 %a2, i32 %b2) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.lh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test3(i32 %a3, i32 %b3) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.hl.s0(i32 %a3, i32 %b3) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.hl.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test4(i32 %a4, i32 %b4) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.hh.s0(i32 %a4, i32 %b4) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.hh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):sat - -define void @test5(i32 %a5, i32 %b5) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32 %a5, i32 %b5) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.ll.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):sat - -define void @test6(i32 %a6, i32 %b6) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32 %a6, i32 %b6) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.lh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):sat - -define void @test7(i32 %a7, i32 %b7) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32 %a7, i32 %b7) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.hl.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):sat - -define void @test8(i32 %a8, i32 %b8) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32 %a8, i32 %b8) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.hh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):rnd - -define void @test9(i32 %a9, i32 %b9) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.rnd.ll.s0(i32 %a9, i32 %b9) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.rnd.ll.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):rnd - -define void @test10(i32 %a10, i32 %b10) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.rnd.lh.s0(i32 %a10, i32 %b10) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.rnd.lh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):rnd - -define void @test11(i32 %a11, i32 %b11) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.rnd.hl.s0(i32 %a11, i32 %b11) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.rnd.hl.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):rnd - -define void @test12(i32 %a12, i32 %b12) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.rnd.hh.s0(i32 %a12, i32 %b12) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.rnd.hh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):rnd:sat - -define void @test13(i32 %a13, i32 %b13) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32 %a13, i32 %b13) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):rnd:sat - -define void @test14(i32 %a14, i32 %b14) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32 %a14, i32 %b14) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):rnd:sat - -define void @test15(i32 %a15, i32 %b15) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32 %a15, i32 %b15) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):rnd:sat - -define void @test16(i32 %a16, i32 %b16) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32 %a16, i32 %b16) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test17(i32 %a17, i32 %b17) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpyu.ll.s0(i32 %a17, i32 %b17) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.ll.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test18(i32 %a18, i32 %b18) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpyu.lh.s0(i32 %a18, i32 %b18) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.lh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test19(i32 %a19, i32 %b19) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpyu.hl.s0(i32 %a19, i32 %b19) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.hl.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test20(i32 %a20, i32 %b20) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpyu.hh.s0(i32 %a20, i32 %b20) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.hh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test21(i32 %a21, i32 %b21) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.ll.s1(i32 %a21, i32 %b21) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.ll.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test22(i32 %a22, i32 %b22) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.lh.s1(i32 %a22, i32 %b22) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.lh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test23(i32 %a23, i32 %b23) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.hl.s1(i32 %a23, i32 %b23) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.hl.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test24(i32 %a24, i32 %b24) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.hh.s1(i32 %a24, i32 %b24) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.hh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:sat - -define void @test25(i32 %a25, i32 %b25) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32 %a25, i32 %b25) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.ll.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:sat - -define void @test26(i32 %a26, i32 %b26) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32 %a26, i32 %b26) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.lh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:sat - -define void @test27(i32 %a27, i32 %b27) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32 %a27, i32 %b27) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.hl.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:sat - -define void @test28(i32 %a28, i32 %b28) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32 %a28, i32 %b28) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.hh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:rnd - -define void @test29(i32 %a29, i32 %b29) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.rnd.ll.s1(i32 %a29, i32 %b29) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.rnd.ll.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:rnd - -define void @test30(i32 %a30, i32 %b30) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.rnd.lh.s1(i32 %a30, i32 %b30) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.rnd.lh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:rnd - -define void @test31(i32 %a31, i32 %b31) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.rnd.hl.s1(i32 %a31, i32 %b31) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.rnd.hl.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:rnd - -define void @test32(i32 %a32, i32 %b32) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.rnd.hh.s1(i32 %a32, i32 %b32) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.rnd.hh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:rnd:sat - -define void @test33(i32 %a33, i32 %b33) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32 %a33, i32 %b33) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.rnd.ll.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:rnd:sat - -define void @test34(i32 %a34, i32 %b34) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32 %a34, i32 %b34) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.rnd.lh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:rnd:sat - -define void @test35(i32 %a35, i32 %b35) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32 %a35, i32 %b35) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hl.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:rnd:sat - -define void @test36(i32 %a36, i32 %b36) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32 %a36, i32 %b36) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.sat.rnd.hh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test37(i32 %a37, i32 %b37) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpyu.ll.s1(i32 %a37, i32 %b37) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.ll.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test38(i32 %a38, i32 %b38) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpyu.lh.s1(i32 %a38, i32 %b38) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.lh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test39(i32 %a39, i32 %b39) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpyu.hl.s1(i32 %a39, i32 %b39) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.hl.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test40(i32 %a40, i32 %b40) #0 { -entry: - %0 = tail call i32 @llvm.hexagon.M2.mpyu.hh.s1(i32 %a40, i32 %b40) - store i32 %0, i32* @c, align 4 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.hh.s1(i32, i32) #1 diff --git a/test/CodeGen/Hexagon/intrinsics-mpy2.ll b/test/CodeGen/Hexagon/intrinsics-mpy2.ll deleted file mode 100644 index b148e5b64e2..00000000000 --- a/test/CodeGen/Hexagon/intrinsics-mpy2.ll +++ /dev/null @@ -1,773 +0,0 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s - -; Verify that the mpy intrinsics with add/subtract are being lowered to the right instruction. - -@c = external global i64 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test1(i64 %a1, i64 %b1) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a1 to i32 - %conv2 = trunc i64 %b1 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.ll.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test2(i64 %a2, i64 %b2) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a2 to i32 - %conv2 = trunc i64 %b2 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.lh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test3(i64 %a3, i64 %b3) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a3 to i32 - %conv2 = trunc i64 %b3 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.hl.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test4(i64 %a4, i64 %b4) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a4 to i32 - %conv2 = trunc i64 %b4 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.hh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):sat - -define void @test5(i64 %a5, i64 %b5) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a5 to i32 - %conv2 = trunc i64 %b5 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):sat - -define void @test6(i64 %a6, i64 %b6) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a6 to i32 - %conv2 = trunc i64 %b6 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):sat - -define void @test7(i64 %a7, i64 %b7) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a7 to i32 - %conv2 = trunc i64 %b7 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):sat - -define void @test8(i64 %a8, i64 %b8) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a8 to i32 - %conv2 = trunc i64 %b8 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test9(i64 %a9, i64 %b9) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a9 to i32 - %conv2 = trunc i64 %b9 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.ll.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test10(i64 %a10, i64 %b10) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a10 to i32 - %conv2 = trunc i64 %b10 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.lh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test11(i64 %a11, i64 %b11) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a11 to i32 - %conv2 = trunc i64 %b11 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.hl.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test12(i64 %a12, i64 %b12) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a12 to i32 - %conv2 = trunc i64 %b12 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.hh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):sat - -define void @test13(i64 %a13, i64 %b13) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a13 to i32 - %conv2 = trunc i64 %b13 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):sat - -define void @test14(i64 %a14, i64 %b14) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a14 to i32 - %conv2 = trunc i64 %b14 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):sat - -define void @test15(i64 %a15, i64 %b15) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a15 to i32 - %conv2 = trunc i64 %b15 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):sat - -define void @test16(i64 %a16, i64 %b16) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a16 to i32 - %conv2 = trunc i64 %b16 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test17(i64 %a17, i64 %b17) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a17 to i32 - %conv2 = trunc i64 %b17 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test18(i64 %a18, i64 %b18) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a18 to i32 - %conv2 = trunc i64 %b18 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test19(i64 %a19, i64 %b19) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a19 to i32 - %conv2 = trunc i64 %b19 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test20(i64 %a20, i64 %b20) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a20 to i32 - %conv2 = trunc i64 %b20 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test21(i64 %a21, i64 %b21) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a21 to i32 - %conv2 = trunc i64 %b21 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test22(i64 %a22, i64 %b22) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a22 to i32 - %conv2 = trunc i64 %b22 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test23(i64 %a23, i64 %b23) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a23 to i32 - %conv2 = trunc i64 %b23 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test24(i64 %a24, i64 %b24) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a24 to i32 - %conv2 = trunc i64 %b24 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s0(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test25(i64 %a25, i64 %b25) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a25 to i32 - %conv2 = trunc i64 %b25 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.ll.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test26(i64 %a26, i64 %b26) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a26 to i32 - %conv2 = trunc i64 %b26 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.lh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test27(i64 %a27, i64 %b27) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a27 to i32 - %conv2 = trunc i64 %b27 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.hl.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test28(i64 %a28, i64 %b28) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a28 to i32 - %conv2 = trunc i64 %b28 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.hh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:sat - -define void @test29(i64 %a29, i64 %b29) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a29 to i32 - %conv2 = trunc i64 %b29 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:sat - -define void @test30(i64 %a30, i64 %b30) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a30 to i32 - %conv2 = trunc i64 %b30 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.sat.lh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:sat - -define void @test31(i64 %a31, i64 %b31) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a31 to i32 - %conv2 = trunc i64 %b31 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.sat.hl.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:sat - -define void @test32(i64 %a32, i64 %b32) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a32 to i32 - %conv2 = trunc i64 %b32 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.acc.sat.hh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test33(i64 %a33, i64 %b33) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a33 to i32 - %conv2 = trunc i64 %b33 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.ll.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test34(i64 %a34, i64 %b34) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a34 to i32 - %conv2 = trunc i64 %b34 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.lh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test35(i64 %a35, i64 %b35) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a35 to i32 - %conv2 = trunc i64 %b35 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.hl.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test36(i64 %a36, i64 %b36) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a36 to i32 - %conv2 = trunc i64 %b36 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.hh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:sat - -define void @test37(i64 %a37, i64 %b37) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a37 to i32 - %conv2 = trunc i64 %b37 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.sat.ll.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:sat - -define void @test38(i64 %a38, i64 %b38) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a38 to i32 - %conv2 = trunc i64 %b38 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.sat.lh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:sat - -define void @test39(i64 %a39, i64 %b39) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a39 to i32 - %conv2 = trunc i64 %b39 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.sat.hl.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:sat - -define void @test40(i64 %a40, i64 %b40) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a40 to i32 - %conv2 = trunc i64 %b40 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpy.nac.sat.hh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test41(i64 %a41, i64 %b41) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a41 to i32 - %conv2 = trunc i64 %b41 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.acc.ll.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test42(i64 %a42, i64 %b42) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a42 to i32 - %conv2 = trunc i64 %b42 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.acc.lh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test43(i64 %a43, i64 %b43) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a43 to i32 - %conv2 = trunc i64 %b43 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.acc.hl.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test44(i64 %a44, i64 %b44) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a44 to i32 - %conv2 = trunc i64 %b44 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.acc.hh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test45(i64 %a45, i64 %b45) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a45 to i32 - %conv2 = trunc i64 %b45 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.nac.ll.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test46(i64 %a46, i64 %b46) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a46 to i32 - %conv2 = trunc i64 %b46 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.nac.lh.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test47(i64 %a47, i64 %b47) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a47 to i32 - %conv2 = trunc i64 %b47 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.nac.hl.s1(i32, i32, i32) #1 - -; CHECK: r{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test48(i64 %a48, i64 %b48) #0 { -entry: - %0 = load i64* @c, align 8 - %conv = trunc i64 %0 to i32 - %conv1 = trunc i64 %a48 to i32 - %conv2 = trunc i64 %b48 to i32 - %1 = tail call i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32 %conv, i32 %conv1, i32 %conv2) - %conv3 = sext i32 %1 to i64 - store i64 %conv3, i64* @c, align 8 - ret void -} - -declare i32 @llvm.hexagon.M2.mpyu.nac.hh.s1(i32, i32, i32) #1 diff --git a/test/CodeGen/Hexagon/intrinsics-mpyd-acc.ll b/test/CodeGen/Hexagon/intrinsics-mpyd-acc.ll deleted file mode 100644 index 85b55c2736e..00000000000 --- a/test/CodeGen/Hexagon/intrinsics-mpyd-acc.ll +++ /dev/null @@ -1,390 +0,0 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s - -; Verify that the mpy intrinsics with accumulation are lowered into -; the right instructions. These instructions have a 64-bit destination register. - -@c = external global i64 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test1(i32 %a1, i32 %b1) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64 %0, i32 %a1, i32 %b1) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test2(i32 %a2, i32 %b2) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64 %0, i32 %a2, i32 %b2) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test3(i32 %a3, i32 %b3) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64 %0, i32 %a3, i32 %b3) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test4(i32 %a4, i32 %b4) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64 %0, i32 %a4, i32 %b4) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test5(i32 %a5, i32 %b5) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64 %0, i32 %a5, i32 %b5) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test6(i32 %a6, i32 %b6) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64 %0, i32 %a6, i32 %b6) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test7(i32 %a7, i32 %b7) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64 %0, i32 %a7, i32 %b7) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test8(i32 %a8, i32 %b8) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64 %0, i32 %a8, i32 %b8) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test9(i32 %a9, i32 %b9) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64 %0, i32 %a9, i32 %b9) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test10(i32 %a10, i32 %b10) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64 %0, i32 %a10, i32 %b10) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test11(i32 %a11, i32 %b11) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64 %0, i32 %a11, i32 %b11) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test12(i32 %a12, i32 %b12) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64 %0, i32 %a12, i32 %b12) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test13(i32 %a13, i32 %b13) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64 %0, i32 %a13, i32 %b13) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test14(i32 %a14, i32 %b14) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64 %0, i32 %a14, i32 %b14) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test15(i32 %a15, i32 %b15) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64 %0, i32 %a15, i32 %b15) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test16(i32 %a16, i32 %b16) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64 %0, i32 %a16, i32 %b16) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s0(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test17(i32 %a17, i32 %b17) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64 %0, i32 %a17, i32 %b17) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.acc.ll.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test18(i32 %a18, i32 %b18) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64 %0, i32 %a18, i32 %b18) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.acc.lh.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test19(i32 %a19, i32 %b19) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64 %0, i32 %a19, i32 %b19) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.acc.hl.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test20(i32 %a20, i32 %b20) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64 %0, i32 %a20, i32 %b20) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.acc.hh.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test21(i32 %a21, i32 %b21) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64 %0, i32 %a21, i32 %b21) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.nac.ll.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test22(i32 %a22, i32 %b22) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64 %0, i32 %a22, i32 %b22) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.nac.lh.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test23(i32 %a23, i32 %b23) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64 %0, i32 %a23, i32 %b23) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.nac.hl.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test24(i32 %a24, i32 %b24) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64 %0, i32 %a24, i32 %b24) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.nac.hh.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test25(i32 %a25, i32 %b25) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64 %0, i32 %a25, i32 %b25) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.acc.ll.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test26(i32 %a26, i32 %b26) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64 %0, i32 %a26, i32 %b26) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.acc.lh.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test27(i32 %a27, i32 %b27) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64 %0, i32 %a27, i32 %b27) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.acc.hl.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}+={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test28(i32 %a28, i32 %b28) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64 %0, i32 %a28, i32 %b28) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.acc.hh.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test29(i32 %a29, i32 %b29) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64 %0, i32 %a29, i32 %b29) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.nac.ll.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test30(i32 %a30, i32 %b30) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64 %0, i32 %a30, i32 %b30) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.nac.lh.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test31(i32 %a31, i32 %b31) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64 %0, i32 %a31, i32 %b31) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.nac.hl.s1(i64, i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}-={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test32(i32 %a32, i32 %b32) #0 { -entry: - %0 = load i64* @c, align 8 - %1 = tail call i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64 %0, i32 %a32, i32 %b32) - store i64 %1, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.nac.hh.s1(i64, i32, i32) #1 diff --git a/test/CodeGen/Hexagon/intrinsics-mpyd.ll b/test/CodeGen/Hexagon/intrinsics-mpyd.ll deleted file mode 100644 index d85fc7adb42..00000000000 --- a/test/CodeGen/Hexagon/intrinsics-mpyd.ll +++ /dev/null @@ -1,270 +0,0 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv5 < %s | FileCheck %s - -; Verify that the mpy intrinsics are lowered into the right instructions. -; These instructions have a 64-bit destination register. - -@c = external global i64 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test1(i32 %a1, i32 %b1) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.hh.s0(i32 %a1, i32 %b1) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.hh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test2(i32 %a2, i32 %b2) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.hl.s0(i32 %a2, i32 %b2) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.hl.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test3(i32 %a3, i32 %b3) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.lh.s0(i32 %a3, i32 %b3) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.lh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test4(i32 %a4, i32 %b4) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.ll.s0(i32 %a4, i32 %b4) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.ll.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test5(i32 %a5, i32 %b5) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.hh.s1(i32 %a5, i32 %b5) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.hh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test6(i32 %a6, i32 %b6) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.hl.s1(i32 %a6, i32 %b6) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.hl.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test7(i32 %a7, i32 %b7) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.lh.s1(i32 %a7, i32 %b7) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.lh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test8(i32 %a8, i32 %b8) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.ll.s1(i32 %a8, i32 %b8) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.ll.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):rnd - -define void @test9(i32 %a9, i32 %b9) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32 %a9, i32 %b9) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):rnd - -define void @test10(i32 %a10, i32 %b10) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32 %a10, i32 %b10) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):rnd - -define void @test11(i32 %a11, i32 %b11) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32 %a11, i32 %b11) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):rnd - -define void @test12(i32 %a12, i32 %b12) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32 %a12, i32 %b12) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:rnd - -define void @test13(i32 %a13, i32 %b13) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32 %a13, i32 %b13) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.rnd.hh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:rnd - -define void @test14(i32 %a14, i32 %b14) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32 %a14, i32 %b14) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.rnd.hl.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1:rnd - -define void @test15(i32 %a15, i32 %b15) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32 %a15, i32 %b15) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.rnd.lh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpy(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1:rnd - -define void @test16(i32 %a16, i32 %b16) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32 %a16, i32 %b16) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyd.rnd.ll.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test17(i32 %a17, i32 %b17) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyud.hh.s0(i32 %a17, i32 %b17) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.hh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test18(i32 %a18, i32 %b18) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyud.hl.s0(i32 %a18, i32 %b18) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.hl.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h) - -define void @test19(i32 %a19, i32 %b19) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyud.lh.s0(i32 %a19, i32 %b19) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.lh.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l) - -define void @test20(i32 %a20, i32 %b20) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyud.ll.s0(i32 %a20, i32 %b20) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.ll.s0(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test21(i32 %a21, i32 %b21) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyud.hh.s1(i32 %a21, i32 %b21) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.hh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.h{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test22(i32 %a22, i32 %b22) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyud.hl.s1(i32 %a22, i32 %b22) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.hl.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.h):<<1 - -define void @test23(i32 %a23, i32 %b23) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyud.lh.s1(i32 %a23, i32 %b23) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.lh.s1(i32, i32) #1 - -; CHECK: r{{[0-9]+}}:{{[0-9]+}}{{ *}}={{ *}}mpyu(r{{[0-9]+}}.l{{ *}},{{ *}}r{{[0-9]+}}.l):<<1 - -define void @test24(i32 %a24, i32 %b24) #0 { -entry: - %0 = tail call i64 @llvm.hexagon.M2.mpyud.ll.s1(i32 %a24, i32 %b24) - store i64 %0, i64* @c, align 8 - ret void -} - -declare i64 @llvm.hexagon.M2.mpyud.ll.s1(i32, i32) #1 diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll new file mode 100644 index 00000000000..6ff93870818 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/alu32_alu.ll @@ -0,0 +1,136 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.1.1 ALU32/ALU + +; Add +declare i32 @llvm.hexagon.A2.addi(i32, i32) +define i32 @A2_addi(i32 %a) { + %z = call i32 @llvm.hexagon.A2.addi(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0 = add(r0, #0) + +declare i32 @llvm.hexagon.A2.add(i32, i32) +define i32 @A2_add(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.add(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0, r1) + +declare i32 @llvm.hexagon.A2.addsat(i32, i32) +define i32 @A2_addsat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.addsat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = add(r0, r1):sat + +; Logical operations +declare i32 @llvm.hexagon.A2.and(i32, i32) +define i32 @A2_and(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.and(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = and(r0, r1) + +declare i32 @llvm.hexagon.A2.or(i32, i32) +define i32 @A2_or(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.or(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = or(r0, r1) + +declare i32 @llvm.hexagon.A2.xor(i32, i32) +define i32 @A2_xor(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.xor(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = xor(r0, r1) + +declare i32 @llvm.hexagon.A4.andn(i32, i32) +define i32 @A4_andn(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.andn(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = and(r0, ~r1) + +declare i32 @llvm.hexagon.A4.orn(i32, i32) +define i32 @A4_orn(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A4.orn(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = or(r0, ~r1) + +; Nop +declare void @llvm.hexagon.A2.nop() +define void @A2_nop(i32 %a, i32 %b) { + call void @llvm.hexagon.A2.nop() + ret void +} +; CHECK: nop + +; Subtract +declare i32 @llvm.hexagon.A2.sub(i32, i32) +define i32 @A2_sub(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.sub(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0, r1) + +declare i32 @llvm.hexagon.A2.subsat(i32, i32) +define i32 @A2_subsat(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.subsat(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = sub(r0, r1):sat + +; Sign extend +declare i32 @llvm.hexagon.A2.sxtb(i32) +define i32 @A2_sxtb(i32 %a) { + %z = call i32 @llvm.hexagon.A2.sxtb(i32 %a) + ret i32 %z +} +; CHECK: r0 = sxtb(r0) + +declare i32 @llvm.hexagon.A2.sxth(i32) +define i32 @A2_sxth(i32 %a) { + %z = call i32 @llvm.hexagon.A2.sxth(i32 %a) + ret i32 %z +} +; CHECK: r0 = sxth(r0) + +; Transfer immediate +declare i32 @llvm.hexagon.A2.tfril(i32, i32) +define i32 @A2_tfril(i32 %a) { + %z = call i32 @llvm.hexagon.A2.tfril(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0.l = #0 + +declare i32 @llvm.hexagon.A2.tfrih(i32, i32) +define i32 @A2_tfrih(i32 %a) { + %z = call i32 @llvm.hexagon.A2.tfrih(i32 %a, i32 0) + ret i32 %z +} +; CHECK: r0.h = #0 + +declare i32 @llvm.hexagon.A2.tfrsi(i32) +define i32 @A2_tfrsi() { + %z = call i32 @llvm.hexagon.A2.tfrsi(i32 0) + ret i32 %z +} +; CHECK: r0 = #0 + +; Transfer register +declare i32 @llvm.hexagon.A2.tfr(i32) +define i32 @A2_tfr(i32 %a) { + %z = call i32 @llvm.hexagon.A2.tfr(i32 %a) + ret i32 %z +} +; CHECK: r0 = r0 + +; Zero extend +declare i32 @llvm.hexagon.A2.zxth(i32) +define i32 @A2_zxth(i32 %a) { + %z = call i32 @llvm.hexagon.A2.zxth(i32 %a) + ret i32 %z +} +; CHECK: r0 = zxth(r0) diff --git a/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll new file mode 100644 index 00000000000..2edb5fd2d39 --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/alu32_perm.ll @@ -0,0 +1,83 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.1.2 ALU32/PERM + +; Combine words into doubleword +declare i64 @llvm.hexagon.A2.combineii(i32, i32) +define i64 @A2_combineii() { + %z = call i64 @llvm.hexagon.A2.combineii(i32 0, i32 0) + ret i64 %z +} +; CHECK: r1:0 = combine(#0, #0) + +declare i32 @llvm.hexagon.A2.combine.hh(i32, i32) +define i32 @A2_combine_hh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.combine.hh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = combine(r0.h, r1.h) + +declare i32 @llvm.hexagon.A2.combine.hl(i32, i32) +define i32 @A2_combine_hl(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.combine.hl(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = combine(r0.h, r1.l) + +declare i32 @llvm.hexagon.A2.combine.lh(i32, i32) +define i32 @A2_combine_lh(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.combine.lh(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = combine(r0.l, r1.h) + +declare i32 @llvm.hexagon.A2.combine.ll(i32, i32) +define i32 @A2_combine_ll(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.A2.combine.ll(i32 %a, i32 %b) + ret i32 %z +} +; CHECK: r0 = combine(r0.l, r1.l) + +declare i64 @llvm.hexagon.A2.combinew(i32, i32) +define i64 @A2_combinew(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.A2.combinew(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = combine(r0, r1) + +; Mux +declare i32 @llvm.hexagon.C2.muxri(i32, i32, i32) +define i32 @C2_muxri(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C2.muxri(i32 %a, i32 0, i32 %b) + ret i32 %z +} +; CHECK: r0 = mux(p0, #0, r1) + +declare i32 @llvm.hexagon.C2.muxir(i32, i32, i32) +define i32 @C2_muxir(i32 %a, i32 %b) { + %z = call i32 @llvm.hexagon.C2.muxir(i32 %a, i32 %b, i32 0) + ret i32 %z +} +; CHECK: r0 = mux(p0, r1, #0) + +; Shift word by 16 +declare i32 @llvm.hexagon.A2.aslh(i32) +define i32 @A2_aslh(i32 %a) { + %z = call i32 @llvm.hexagon.A2.aslh(i32 %a) + ret i32 %z +} +; CHECK: r0 = aslh(r0) + +declare i32 @llvm.hexagon.A2.asrh(i32) +define i32 @A2_asrh(i32 %a) { + %z = call i32 @llvm.hexagon.A2.asrh(i32 %a) + ret i32 %z +} +; CHECK: r0 = asrh(r0) + +; Pack high and low halfwords +declare i64 @llvm.hexagon.S2.packhl(i32, i32) +define i64 @S2_packhl(i32 %a, i32 %b) { + %z = call i64 @llvm.hexagon.S2.packhl(i32 %a, i32 %b) + ret i64 %z +} +; CHECK: r1:0 = packhl(r0, r1) diff --git a/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll new file mode 100644 index 00000000000..7e463cfc9fe --- /dev/null +++ b/test/CodeGen/Hexagon/intrinsics/xtype_perm.ll @@ -0,0 +1,46 @@ +; RUN: llc -march=hexagon -O0 < %s | FileCheck %s +; Hexagon Programmer's Reference Manual 11.10.6 XTYPE/PERM + +; Saturate +declare i32 @llvm.hexagon.A2.sat(i64) +define i32 @A2_sat(i64 %a) { + %z = call i32 @llvm.hexagon.A2.sat(i64 %a) + ret i32 %z +} +; CHECK: r0 = sat(r1:0) + +declare i32 @llvm.hexagon.A2.sath(i32) +define i32 @A2_sath(i32 %a) { + %z = call i32 @llvm.hexagon.A2.sath(i32 %a) + ret i32 %z +} +; CHECK: r0 = sath(r0) + +declare i32 @llvm.hexagon.A2.satuh(i32) +define i32 @A2_satuh(i32 %a) { + %z = call i32 @llvm.hexagon.A2.satuh(i32 %a) + ret i32 %z +} +; CHECK: r0 = satuh(r0) + +declare i32 @llvm.hexagon.A2.satub(i32) +define i32 @A2_satub(i32 %a) { + %z = call i32 @llvm.hexagon.A2.satub(i32 %a) + ret i32 %z +} +; CHECK: r0 = satub(r0) + +declare i32 @llvm.hexagon.A2.satb(i32) +define i32 @A2_satb(i32 %a) { + %z = call i32 @llvm.hexagon.A2.satb(i32 %a) + ret i32 %z +} +; CHECK: r0 = satb(r0) + +; Swizzle bytes +declare i32 @llvm.hexagon.A2.swiz(i32) +define i32 @A2_swiz(i32 %a) { + %z = call i32 @llvm.hexagon.A2.swiz(i32 %a) + ret i32 %z +} +; CHECK: r0 = swiz(r0)