1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
; RUN: llc < %s -mtriple=thumb-linux-unknown-gnueabi -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=Thumb-linux
; RUN: llc < %s -mtriple=thumb-linux-androideabi -segmented-stacks -verify-machineinstrs | FileCheck %s -check-prefix=Thumb-android
; RUN: llc < %s -mtriple=thumb-linux-unknown-gnueabi -segmented-stacks -filetype=obj
; RUN: llc < %s -mtriple=thumb-linux-androideabi -segmented-stacks -filetype=obj
; Just to prevent the alloca from being optimized away
declare void @dummy_use(i32*, i32)
define i32 @test_basic(i32 %l) {
%mem = alloca i32, i32 %l
call void @dummy_use (i32* %mem, i32 %l)
%terminate = icmp eq i32 %l, 0
br i1 %terminate, label %true, label %false
true:
ret i32 0
false:
%newlen = sub i32 %l, 1
%retvalue = call i32 @test_basic(i32 %newlen)
ret i32 %retvalue
; Thumb-linux: test_basic:
; Thumb-linux: push {r4, r5}
; Thumb-linux: mov r5, sp
; Thumb-linux-NEXT: ldr r4, .LCPI0_0
; Thumb-linux-NEXT: ldr r4, [r4]
; Thumb-linux-NEXT: cmp r4, r5
; Thumb-linux-NEXT: blo .LBB0_2
; Thumb-linux: mov r4, #16
; Thumb-linux-NEXT: mov r5, #0
; Thumb-linux-NEXT: push {lr}
; Thumb-linux-NEXT: bl __morestack
; Thumb-linux-NEXT: pop {r4}
; Thumb-linux-NEXT: mov lr, r4
; Thumb-linux-NEXT: pop {r4, r5}
; Thumb-linux-NEXT: bx lr
; Thumb-linux: pop {r4, r5}
; Thumb-android: test_basic:
; Thumb-android: push {r4, r5}
; Thumb-android: mov r5, sp
; Thumb-android-NEXT: ldr r4, .LCPI0_0
; Thumb-android-NEXT: ldr r4, [r4]
; Thumb-android-NEXT: cmp r4, r5
; Thumb-android-NEXT: blo .LBB0_2
; Thumb-android: mov r4, #16
; Thumb-android-NEXT: mov r5, #0
; Thumb-android-NEXT: push {lr}
; Thumb-android-NEXT: bl __morestack
; Thumb-android-NEXT: pop {r4}
; Thumb-android-NEXT: mov lr, r4
; Thumb-android-NEXT: pop {r4, r5}
; Thumb-android-NEXT: bx lr
; Thumb-android: pop {r4, r5}
}
|