summaryrefslogtreecommitdiffstats
path: root/test/Transforms/SROA
diff options
context:
space:
mode:
Diffstat (limited to 'test/Transforms/SROA')
-rw-r--r--test/Transforms/SROA/alignment.ll13
-rw-r--r--test/Transforms/SROA/basictest.ll155
-rw-r--r--test/Transforms/SROA/vector-promotion.ll19
3 files changed, 182 insertions, 5 deletions
diff --git a/test/Transforms/SROA/alignment.ll b/test/Transforms/SROA/alignment.ll
index 5fa78766..4f4e40c 100644
--- a/test/Transforms/SROA/alignment.ll
+++ b/test/Transforms/SROA/alignment.ll
@@ -85,15 +85,18 @@ entry:
}
define void @test5() {
-; Test that we preserve underaligned loads and stores when splitting.
+; Test that we preserve underaligned loads and stores when splitting. The use
+; of volatile in this test case is just to force the loads and stores to not be
+; split or promoted out of existence.
+;
; CHECK-LABEL: @test5(
; CHECK: alloca [9 x i8]
; CHECK: alloca [9 x i8]
; CHECK: store volatile double 0.0{{.*}}, double* %{{.*}}, align 1
-; CHECK: load i16* %{{.*}}, align 1
+; CHECK: load volatile i16* %{{.*}}, align 1
; CHECK: load double* %{{.*}}, align 1
; CHECK: store volatile double %{{.*}}, double* %{{.*}}, align 1
-; CHECK: load i16* %{{.*}}, align 1
+; CHECK: load volatile i16* %{{.*}}, align 1
; CHECK: ret void
entry:
@@ -103,7 +106,7 @@ entry:
store volatile double 0.0, double* %ptr1, align 1
%weird_gep1 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 7
%weird_cast1 = bitcast i8* %weird_gep1 to i16*
- %weird_load1 = load i16* %weird_cast1, align 1
+ %weird_load1 = load volatile i16* %weird_cast1, align 1
%raw2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 9
%ptr2 = bitcast i8* %raw2 to double*
@@ -111,7 +114,7 @@ entry:
store volatile double %d1, double* %ptr2, align 1
%weird_gep2 = getelementptr inbounds [18 x i8]* %a, i32 0, i32 16
%weird_cast2 = bitcast i8* %weird_gep2 to i16*
- %weird_load2 = load i16* %weird_cast2, align 1
+ %weird_load2 = load volatile i16* %weird_cast2, align 1
ret void
}
diff --git a/test/Transforms/SROA/basictest.ll b/test/Transforms/SROA/basictest.ll
index dc2b165..e3f762a 100644
--- a/test/Transforms/SROA/basictest.ll
+++ b/test/Transforms/SROA/basictest.ll
@@ -1440,3 +1440,158 @@ entry:
ret void
}
+define float @test25() {
+; Check that we split up stores in order to promote the smaller SSA values.. These types
+; of patterns can arise because LLVM maps small memcpy's to integer load and
+; stores. If we get a memcpy of an aggregate (such as C and C++ frontends would
+; produce, but so might any language frontend), this will in many cases turn into
+; an integer load and store. SROA needs to be extremely powerful to correctly
+; handle these cases and form splitable and promotable SSA values.
+;
+; CHECK-LABEL: @test25(
+; CHECK-NOT: alloca
+; CHECK: %[[F1:.*]] = bitcast i32 0 to float
+; CHECK: %[[F2:.*]] = bitcast i32 1065353216 to float
+; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
+; CHECK: ret float %[[SUM]]
+
+entry:
+ %a = alloca i64
+ %b = alloca i64
+ %a.cast = bitcast i64* %a to [2 x float]*
+ %a.gep1 = getelementptr [2 x float]* %a.cast, i32 0, i32 0
+ %a.gep2 = getelementptr [2 x float]* %a.cast, i32 0, i32 1
+ %b.cast = bitcast i64* %b to [2 x float]*
+ %b.gep1 = getelementptr [2 x float]* %b.cast, i32 0, i32 0
+ %b.gep2 = getelementptr [2 x float]* %b.cast, i32 0, i32 1
+ store float 0.0, float* %a.gep1
+ store float 1.0, float* %a.gep2
+ %v = load i64* %a
+ store i64 %v, i64* %b
+ %f1 = load float* %b.gep1
+ %f2 = load float* %b.gep2
+ %ret = fadd float %f1, %f2
+ ret float %ret
+}
+
+@complex1 = external global [2 x float]
+@complex2 = external global [2 x float]
+
+define void @test26() {
+; Test a case of splitting up loads and stores against a globals.
+;
+; CHECK-LABEL: @test26(
+; CHECK-NOT: alloca
+; CHECK: %[[L1:.*]] = load i32* bitcast
+; CHECK: %[[L2:.*]] = load i32* bitcast
+; CHECK: %[[F1:.*]] = bitcast i32 %[[L1]] to float
+; CHECK: %[[F2:.*]] = bitcast i32 %[[L2]] to float
+; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
+; CHECK: %[[C1:.*]] = bitcast float %[[SUM]] to i32
+; CHECK: %[[C2:.*]] = bitcast float %[[SUM]] to i32
+; CHECK: store i32 %[[C1]], i32* bitcast
+; CHECK: store i32 %[[C2]], i32* bitcast
+; CHECK: ret void
+
+entry:
+ %a = alloca i64
+ %a.cast = bitcast i64* %a to [2 x float]*
+ %a.gep1 = getelementptr [2 x float]* %a.cast, i32 0, i32 0
+ %a.gep2 = getelementptr [2 x float]* %a.cast, i32 0, i32 1
+ %v1 = load i64* bitcast ([2 x float]* @complex1 to i64*)
+ store i64 %v1, i64* %a
+ %f1 = load float* %a.gep1
+ %f2 = load float* %a.gep2
+ %sum = fadd float %f1, %f2
+ store float %sum, float* %a.gep1
+ store float %sum, float* %a.gep2
+ %v2 = load i64* %a
+ store i64 %v2, i64* bitcast ([2 x float]* @complex2 to i64*)
+ ret void
+}
+
+define float @test27() {
+; Another, more complex case of splittable i64 loads and stores. This example
+; is a particularly challenging one because the load and store both point into
+; the alloca SROA is processing, and they overlap but at an offset.
+;
+; CHECK-LABEL: @test27(
+; CHECK-NOT: alloca
+; CHECK: %[[F1:.*]] = bitcast i32 0 to float
+; CHECK: %[[F2:.*]] = bitcast i32 1065353216 to float
+; CHECK: %[[SUM:.*]] = fadd float %[[F1]], %[[F2]]
+; CHECK: ret float %[[SUM]]
+
+entry:
+ %a = alloca [12 x i8]
+ %gep1 = getelementptr [12 x i8]* %a, i32 0, i32 0
+ %gep2 = getelementptr [12 x i8]* %a, i32 0, i32 4
+ %gep3 = getelementptr [12 x i8]* %a, i32 0, i32 8
+ %iptr1 = bitcast i8* %gep1 to i64*
+ %iptr2 = bitcast i8* %gep2 to i64*
+ %fptr1 = bitcast i8* %gep1 to float*
+ %fptr2 = bitcast i8* %gep2 to float*
+ %fptr3 = bitcast i8* %gep3 to float*
+ store float 0.0, float* %fptr1
+ store float 1.0, float* %fptr2
+ %v = load i64* %iptr1
+ store i64 %v, i64* %iptr2
+ %f1 = load float* %fptr2
+ %f2 = load float* %fptr3
+ %ret = fadd float %f1, %f2
+ ret float %ret
+}
+
+define i32 @PR22093() {
+; Test that we don't try to pre-split a splittable store of a splittable but
+; not pre-splittable load over the same alloca. We "handle" this case when the
+; load is unsplittable but unrelated to this alloca by just generating extra
+; loads without touching the original, but when the original load was out of
+; this alloca we need to handle it specially to ensure the splits line up
+; properly for rewriting.
+;
+; CHECK-LABEL: @PR22093(
+; CHECK-NOT: alloca
+; CHECK: alloca i16
+; CHECK-NOT: alloca
+; CHECK: store volatile i16
+
+entry:
+ %a = alloca i32
+ %a.cast = bitcast i32* %a to i16*
+ store volatile i16 42, i16* %a.cast
+ %load = load i32* %a
+ store i32 %load, i32* %a
+ ret i32 %load
+}
+
+define void @PR22093.2() {
+; Another way that we end up being unable to split a particular set of loads
+; and stores can even have ordering importance. Here we have a load which is
+; pre-splittable by itself, and the first store is also compatible. But the
+; second store of the load makes the load unsplittable because of a mismatch of
+; splits. Because this makes the load unsplittable, we also have to go back and
+; remove the first store from the presplit candidates as its load won't be
+; presplit.
+;
+; CHECK-LABEL: @PR22093.2(
+; CHECK-NOT: alloca
+; CHECK: alloca i16
+; CHECK-NEXT: alloca i8
+; CHECK-NOT: alloca
+; CHECK: store volatile i16
+; CHECK: store volatile i8
+
+entry:
+ %a = alloca i64
+ %a.cast1 = bitcast i64* %a to i32*
+ %a.cast2 = bitcast i64* %a to i16*
+ store volatile i16 42, i16* %a.cast2
+ %load = load i32* %a.cast1
+ store i32 %load, i32* %a.cast1
+ %a.gep1 = getelementptr i32* %a.cast1, i32 1
+ %a.cast3 = bitcast i32* %a.gep1 to i8*
+ store volatile i8 13, i8* %a.cast3
+ store i32 %load, i32* %a.gep1
+ ret void
+}
diff --git a/test/Transforms/SROA/vector-promotion.ll b/test/Transforms/SROA/vector-promotion.ll
index 830a22a..c20c635 100644
--- a/test/Transforms/SROA/vector-promotion.ll
+++ b/test/Transforms/SROA/vector-promotion.ll
@@ -604,3 +604,22 @@ entry:
ret <2 x float> %result
; CHECK-NEXT: ret <2 x float> %[[V4]]
}
+
+define <4 x float> @test12() {
+; CHECK-LABEL: @test12(
+ %a = alloca <3 x i32>, align 16
+; CHECK-NOT: alloca
+
+ %cast1 = bitcast <3 x i32>* %a to <4 x i32>*
+ store <4 x i32> undef, <4 x i32>* %cast1, align 16
+; CHECK-NOT: store
+
+ %cast2 = bitcast <3 x i32>* %a to <3 x float>*
+ %cast3 = bitcast <3 x float>* %cast2 to <4 x float>*
+ %vec = load <4 x float>* %cast3
+; CHECK-NOT: load
+
+; CHECK: %[[ret:.*]] = bitcast <4 x i32> undef to <4 x float>
+; CHECK-NEXT: ret <4 x float> %[[ret]]
+ ret <4 x float> %vec
+}