; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]]
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
; ALL-DAG: sh [[ARG1]], 2([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i16
%e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
store volatile i16 %arg1, i16* %e1, align 2
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i16
%e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
store volatile i16 %arg2, i16* %e2, align 2
; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]]
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
; ALL-DAG: sw [[ARG1]], 4([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i32
%e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
store volatile i32 %arg1, i32* %e1, align 4
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i32
%e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
store volatile i32 %arg2, i32* %e2, align 4
; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]] (and realign pointer for O32)
; O32: lw [[VA:\$[0-9]+]], 0([[SP]])
; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]])
; NEW-DAG: sd [[ARG1]], 8([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; FIXME: We're still aligned from the last one but CodeGen doesn't spot that.
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i64
%e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
store volatile i64 %arg1, i64* %e1, align 8
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i64
%e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
store volatile i64 %arg2, i64* %e2, align 8
; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]]
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
; ALL-DAG: sh [[ARG1]], 2([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i16
%e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
store volatile i16 %arg1, i16* %e1, align 2
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i16
%e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
store volatile i16 %arg2, i16* %e2, align 2
; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]]
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
; ALL-DAG: sw [[ARG1]], 4([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i32
%e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
store volatile i32 %arg1, i32* %e1, align 4
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i32
%e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
store volatile i32 %arg2, i32* %e2, align 4
; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]] (and realign pointer for O32)
; O32: lw [[VA:\$[0-9]+]], 0([[SP]])
; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]])
; NEW-DAG: sd [[ARG1]], 8([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; FIXME: We're still aligned from the last one but CodeGen doesn't spot that.
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i64
%e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
store volatile i64 %arg1, i64* %e1, align 8
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i64
%e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
store volatile i64 %arg2, i64* %e2, align 8
; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]]
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
; ALL-DAG: sh [[ARG1]], 2([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i16
%e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
store volatile i16 %arg1, i16* %e1, align 2
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i16
%e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
store volatile i16 %arg2, i16* %e2, align 2
; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]]
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
; ALL-DAG: sw [[ARG1]], 4([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]])
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i32
%e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
store volatile i32 %arg1, i32* %e1, align 4
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i32
%e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
store volatile i32 %arg2, i32* %e2, align 4
; Store [[VA]]
; O32-DAG: sw [[VA]], 0([[SP]])
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
; Increment [[VA]] (and realign pointer for O32)
; O32: lw [[VA:\$[0-9]+]], 0([[SP]])
; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]])
; NEW-DAG: sd [[ARG1]], 8([[GV]])
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
; Increment [[VA]] again.
; FIXME: We're still aligned from the last one but CodeGen doesn't spot that.
%ap2 = bitcast i8** %ap to i8*
call void @llvm.va_start(i8* %ap2)
- call void asm sideeffect "# ANCHOR1", ""()
+ call void asm sideeffect "teqi $$zero, 1", ""()
%arg1 = va_arg i8** %ap, i64
%e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
store volatile i64 %arg1, i64* %e1, align 8
- call void asm sideeffect "# ANCHOR2", ""()
+ call void asm sideeffect "teqi $$zero, 2", ""()
%arg2 = va_arg i8** %ap, i64
%e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
store volatile i64 %arg2, i64* %e2, align 8
; On the other hand, if odd single precision registers are not permitted, it
; must copy $f13 to an even-numbered register before inserting into the
; vector.
- call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
%1 = insertelement <4 x float> %0, float %b, i32 0
store <4 x float> %1, <4 x float>* @v4f32
ret void
; NOODDSPREG: mov.s $f[[F0:[0-9]+]], $f13
; NOODDSPREG: insve.w $w[[W0]][0], $w[[F0]][0]
; ODDSPREG: insve.w $w[[W0]][0], $w13[0]
-; ALL: # Clobber
+; ALL: teqi $zero, 1
; ALL-NOT: sdc1
; ALL-NOT: ldc1
; ALL: st.w $w[[W0]], 0($[[R0]])
; On the other hand, if odd single precision registers are not permitted, it
; must copy $f13 to an even-numbered register before inserting into the
; vector.
- call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
%1 = insertelement <4 x float> %0, float %b, i32 1
store <4 x float> %1, <4 x float>* @v4f32
ret void
; NOODDSPREG: mov.s $f[[F0:[0-9]+]], $f13
; NOODDSPREG: insve.w $w[[W0]][1], $w[[F0]][0]
; ODDSPREG: insve.w $w[[W0]][1], $w13[0]
-; ALL: # Clobber
+; ALL: teqi $zero, 1
; ALL-NOT: sdc1
; ALL-NOT: ldc1
; ALL: st.w $w[[W0]], 0($[[R0]])
;
; On the other hand, if odd single precision registers are not permitted, it
; must move it to $f12/$w12.
- call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
%2 = extractelement <4 x float> %1, i32 0
ret float %2
; ALL: ld.w $w12, 0($[[R0]])
; ALL: move.v $w[[W0:13]], $w12
; NOODDSPREG: move.v $w[[W0:12]], $w13
-; ALL: # Clobber
+; ALL: teqi $zero, 1
; ALL-NOT: st.w
; ALL-NOT: ld.w
; ALL: mov.s $f0, $f[[W0]]
;
; On the other hand, if odd single precision registers are not permitted, it
; must be spilled.
- call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+ call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
%2 = extractelement <4 x float> %1, i32 1
ret float %2
; NOODDSPREG: st.w $w[[W0]], 0($sp)
; ODDSPREG-NOT: st.w
; ODDSPREG-NOT: ld.w
-; ALL: # Clobber
+; ALL: teqi $zero, 1
; ODDSPREG-NOT: st.w
; ODDSPREG-NOT: ld.w
; NOODDSPREG: ld.w $w0, 0($sp)