summaryrefslogtreecommitdiffstats
path: root/test/Transforms/Inline/inline_cleanup.ll
diff options
context:
space:
mode:
authorTanya Lattner <tonic@nondot.org>2008-03-01 09:15:35 +0000
committerTanya Lattner <tonic@nondot.org>2008-03-01 09:15:35 +0000
commitec9a35a6f9143cfa325e0413cc297c48f627973a (patch)
treec1b0621415acc20f94152c51174d53fae3dbc8cc /test/Transforms/Inline/inline_cleanup.ll
parentab3b77834c9232e4c13acb29afe1920b97c5a20b (diff)
downloadexternal_llvm-ec9a35a6f9143cfa325e0413cc297c48f627973a.zip
external_llvm-ec9a35a6f9143cfa325e0413cc297c48f627973a.tar.gz
external_llvm-ec9a35a6f9143cfa325e0413cc297c48f627973a.tar.bz2
Remove llvm-upgrade and update test cases.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@47793 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/Transforms/Inline/inline_cleanup.ll')
-rw-r--r--test/Transforms/Inline/inline_cleanup.ll67
1 files changed, 32 insertions, 35 deletions
diff --git a/test/Transforms/Inline/inline_cleanup.ll b/test/Transforms/Inline/inline_cleanup.ll
index 2ec67ff..89b3a82 100644
--- a/test/Transforms/Inline/inline_cleanup.ll
+++ b/test/Transforms/Inline/inline_cleanup.ll
@@ -1,66 +1,63 @@
; Test that the inliner doesn't leave around dead allocas, and that it folds
; uncond branches away after it is done specializing.
-; RUN: llvm-upgrade < %s | llvm-as | opt -inline | llvm-dis | \
+; RUN: llvm-as < %s | opt -inline | llvm-dis | \
; RUN: not grep {alloca.*uses=0}
-; RUN: llvm-upgrade < %s | llvm-as | opt -inline | llvm-dis | \
+; RUN: llvm-as < %s | opt -inline | llvm-dis | \
; RUN: not grep {br label}
+@A = weak global i32 0 ; <i32*> [#uses=1]
+@B = weak global i32 0 ; <i32*> [#uses=1]
+@C = weak global i32 0 ; <i32*> [#uses=1]
-%A = weak global int 0 ; <int*> [#uses=1]
-%B = weak global int 0 ; <int*> [#uses=1]
-%C = weak global int 0 ; <int*> [#uses=1]
-
-implementation ; Functions:
-
-internal fastcc void %foo(int %X) {
+define internal fastcc void @foo(i32 %X) {
entry:
- %ALL = alloca int, align 4 ; <int*> [#uses=1]
- %tmp1 = and int %X, 1 ; <int> [#uses=1]
- %tmp1 = seteq int %tmp1, 0 ; <bool> [#uses=1]
- br bool %tmp1, label %cond_next, label %cond_true
+ %ALL = alloca i32, align 4 ; <i32*> [#uses=1]
+ %tmp1 = and i32 %X, 1 ; <i32> [#uses=1]
+ %tmp1.upgrd.1 = icmp eq i32 %tmp1, 0 ; <i1> [#uses=1]
+ br i1 %tmp1.upgrd.1, label %cond_next, label %cond_true
cond_true: ; preds = %entry
- store int 1, int* %A
+ store i32 1, i32* @A
br label %cond_next
-cond_next: ; preds = %entry, %cond_true
- %tmp4 = and int %X, 2 ; <int> [#uses=1]
- %tmp4 = seteq int %tmp4, 0 ; <bool> [#uses=1]
- br bool %tmp4, label %cond_next7, label %cond_true5
+cond_next: ; preds = %cond_true, %entry
+ %tmp4 = and i32 %X, 2 ; <i32> [#uses=1]
+ %tmp4.upgrd.2 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
+ br i1 %tmp4.upgrd.2, label %cond_next7, label %cond_true5
cond_true5: ; preds = %cond_next
- store int 1, int* %B
+ store i32 1, i32* @B
br label %cond_next7
-cond_next7: ; preds = %cond_next, %cond_true5
- %tmp10 = and int %X, 4 ; <int> [#uses=1]
- %tmp10 = seteq int %tmp10, 0 ; <bool> [#uses=1]
- br bool %tmp10, label %cond_next13, label %cond_true11
+cond_next7: ; preds = %cond_true5, %cond_next
+ %tmp10 = and i32 %X, 4 ; <i32> [#uses=1]
+ %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1]
+ br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
cond_true11: ; preds = %cond_next7
- store int 1, int* %C
+ store i32 1, i32* @C
br label %cond_next13
-cond_next13: ; preds = %cond_next7, %cond_true11
- %tmp16 = and int %X, 8 ; <int> [#uses=1]
- %tmp16 = seteq int %tmp16, 0 ; <bool> [#uses=1]
- br bool %tmp16, label %UnifiedReturnBlock, label %cond_true17
+cond_next13: ; preds = %cond_true11, %cond_next7
+ %tmp16 = and i32 %X, 8 ; <i32> [#uses=1]
+ %tmp16.upgrd.4 = icmp eq i32 %tmp16, 0 ; <i1> [#uses=1]
+ br i1 %tmp16.upgrd.4, label %UnifiedReturnBlock, label %cond_true17
cond_true17: ; preds = %cond_next13
- call void %ext( int* %ALL )
+ call void @ext( i32* %ALL )
ret void
UnifiedReturnBlock: ; preds = %cond_next13
ret void
}
-declare void %ext(int*)
+declare void @ext(i32*)
-void %test() {
+define void @test() {
entry:
- tail call fastcc void %foo( int 1 )
- tail call fastcc void %foo( int 2 )
- tail call fastcc void %foo( int 3 )
- tail call fastcc void %foo( int 8 )
+ tail call fastcc void @foo( i32 1 )
+ tail call fastcc void @foo( i32 2 )
+ tail call fastcc void @foo( i32 3 )
+ tail call fastcc void @foo( i32 8 )
ret void
}