diff options
Diffstat (limited to 'test/CodeGen/X86/sse-scalar-fp-arith.ll')
-rw-r--r-- | test/CodeGen/X86/sse-scalar-fp-arith.ll | 840 |
1 files changed, 700 insertions, 140 deletions
diff --git a/test/CodeGen/X86/sse-scalar-fp-arith.ll b/test/CodeGen/X86/sse-scalar-fp-arith.ll index 3949a83..b122ef6 100644 --- a/test/CodeGen/X86/sse-scalar-fp-arith.ll +++ b/test/CodeGen/X86/sse-scalar-fp-arith.ll @@ -1,13 +1,23 @@ -; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s -; RUN: llc -mtriple=x86_64-pc-linux -mattr=-sse4.1 -mcpu=corei7 < %s | FileCheck -check-prefix=CHECK -check-prefix=SSE2 %s -; RUN: llc -mtriple=x86_64-pc-linux -mcpu=corei7-avx < %s | FileCheck -check-prefix=CHECK -check-prefix=AVX %s +; RUN: llc -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck --check-prefix=SSE --check-prefix=SSE2 %s +; RUN: llc -mcpu=x86-64 -mattr=+sse4.1 < %s | FileCheck --check-prefix=SSE --check-prefix=SSE41 %s +; RUN: llc -mcpu=x86-64 -mattr=+avx < %s | FileCheck --check-prefix=AVX %s + +target triple = "x86_64-unknown-unknown" ; Ensure that the backend no longer emits unnecessary vector insert ; instructions immediately after SSE scalar fp instructions ; like addss or mulss. - define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %add = fadd float %2, %1 @@ -15,14 +25,16 @@ define <4 x float> @test_add_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_add_ss -; SSE2: addss %xmm1, %xmm0 -; AVX: vaddss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %sub = fsub float %2, %1 @@ -30,13 +42,16 @@ define <4 x float> @test_sub_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_sub_ss -; SSE2: subss %xmm1, %xmm0 -; AVX: vsubss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %mul = fmul float %2, %1 @@ -44,14 +59,16 @@ define <4 x float> @test_mul_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_mul_ss -; SSE2: mulss %xmm1, %xmm0 -; AVX: vmulss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %div = fdiv float %2, %1 @@ -59,14 +76,16 @@ define <4 x float> @test_div_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_div_ss -; SSE2: divss %xmm1, %xmm0 -; AVX: vdivss %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %b, i32 0 %2 = extractelement <2 x double> %a, i32 0 %add = fadd double %2, %1 @@ -74,14 +93,16 @@ define <2 x double> @test_add_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test_add_sd -; SSE2: addsd %xmm1, %xmm0 -; AVX: vaddsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %b, i32 0 %2 = extractelement <2 x double> %a, i32 0 %sub = fsub double %2, %1 @@ -89,14 +110,16 @@ define <2 x double> @test_sub_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test_sub_sd -; SSE2: subsd %xmm1, %xmm0 -; AVX: vsubsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %b, i32 0 %2 = extractelement <2 x double> %a, i32 0 %mul = fmul double %2, %1 @@ -104,14 +127,16 @@ define <2 x double> @test_mul_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test_mul_sd -; SSE2: mulsd %xmm1, %xmm0 -; AVX: vmulsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %b, i32 0 %2 = extractelement <2 x double> %a, i32 0 %div = fdiv double %2, %1 @@ -119,14 +144,17 @@ define <2 x double> @test_div_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test_div_sd -; SSE2: divsd %xmm1, %xmm0 -; AVX: vdivsd %xmm1, %xmm0, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test2_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %a, i32 0 %2 = extractelement <4 x float> %b, i32 0 %add = fadd float %1, %2 @@ -134,14 +162,17 @@ define <4 x float> @test2_add_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test2_add_ss -; SSE2: addss %xmm0, %xmm1 -; AVX: vaddss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test2_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %a, i32 0 %2 = extractelement <4 x float> %b, i32 0 %sub = fsub float %2, %1 @@ -149,14 +180,17 @@ define <4 x float> @test2_sub_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test2_sub_ss -; SSE2: subss %xmm0, %xmm1 -; AVX: vsubss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test2_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %a, i32 0 %2 = extractelement <4 x float> %b, i32 0 %mul = fmul float %1, %2 @@ -164,14 +198,17 @@ define <4 x float> @test2_mul_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test2_mul_ss -; SSE2: mulss %xmm0, %xmm1 -; AVX: vmulss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test2_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %a, i32 0 %2 = extractelement <4 x float> %b, i32 0 %div = fdiv float %2, %1 @@ -179,14 +216,17 @@ define <4 x float> @test2_div_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test2_div_ss -; SSE2: divss %xmm0, %xmm1 -; AVX: vdivss %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movss -; CHECK: ret - - define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test2_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %b, i32 0 %add = fadd double %1, %2 @@ -194,14 +234,17 @@ define <2 x double> @test2_add_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test2_add_sd -; SSE2: addsd %xmm0, %xmm1 -; AVX: vaddsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test2_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %b, i32 0 %sub = fsub double %2, %1 @@ -209,14 +252,17 @@ define <2 x double> @test2_sub_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test2_sub_sd -; SSE2: subsd %xmm0, %xmm1 -; AVX: vsubsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test2_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %b, i32 0 %mul = fmul double %1, %2 @@ -224,14 +270,17 @@ define <2 x double> @test2_mul_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test2_mul_sd -; SSE2: mulsd %xmm0, %xmm1 -; AVX: vmulsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: test2_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test2_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq %1 = extractelement <2 x double> %a, i32 0 %2 = extractelement <2 x double> %b, i32 0 %div = fdiv double %2, %1 @@ -239,14 +288,18 @@ define <2 x double> @test2_div_sd(<2 x double> %a, <2 x double> %b) { ret <2 x double> %3 } -; CHECK-LABEL: test2_div_sd -; SSE2: divsd %xmm0, %xmm1 -; AVX: vdivsd %xmm0, %xmm1, %xmm0 -; CHECK-NOT: movsd -; CHECK: ret - - define <4 x float> @test_multiple_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_multiple_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm0, %xmm1 +; SSE-NEXT: addss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_multiple_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %add = fadd float %2, %1 @@ -255,14 +308,19 @@ define <4 x float> @test_multiple_add_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_multiple_add_ss -; CHECK: addss -; CHECK: addss -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test_multiple_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_multiple_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: subss %xmm1, %xmm2 +; SSE-NEXT: subss %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_multiple_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %sub = fsub float %2, %1 @@ -271,14 +329,18 @@ define <4 x float> @test_multiple_sub_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_multiple_sub_ss -; CHECK: subss -; CHECK: subss -; CHECK-NOT: movss -; CHECK: ret - - define <4 x float> @test_multiple_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_multiple_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm0, %xmm1 +; SSE-NEXT: mulss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_multiple_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %mul = fmul float %2, %1 @@ -287,13 +349,19 @@ define <4 x float> @test_multiple_mul_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_multiple_mul_ss -; CHECK: mulss -; CHECK: mulss -; CHECK-NOT: movss -; CHECK: ret - define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: test_multiple_div_ss: +; SSE: # BB#0: +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: divss %xmm1, %xmm2 +; SSE-NEXT: divss %xmm2, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: test_multiple_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm1 +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq %1 = extractelement <4 x float> %b, i32 0 %2 = extractelement <4 x float> %a, i32 0 %div = fdiv float %2, %1 @@ -302,9 +370,501 @@ define <4 x float> @test_multiple_div_ss(<4 x float> %a, <4 x float> %b) { ret <4 x float> %3 } -; CHECK-LABEL: test_multiple_div_ss -; CHECK: divss -; CHECK: divss -; CHECK-NOT: movss -; CHECK: ret +; Ensure that the backend selects SSE/AVX scalar fp instructions +; from a packed fp instrution plus a vector insert. + +define <4 x float> @insert_test_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fadd <4 x float> %a, %b + %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x float> %2 +} + +define <4 x float> @insert_test_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fsub <4 x float> %a, %b + %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x float> %2 +} + +define <4 x float> @insert_test_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fmul <4 x float> %a, %b + %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x float> %2 +} + +define <4 x float> @insert_test_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <4 x float> %a, %b + %2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x float> %2 +} + +define <2 x double> @insert_test_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fadd <2 x double> %a, %b + %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3> + ret <2 x double> %2 +} + +define <2 x double> @insert_test_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fsub <2 x double> %a, %b + %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3> + ret <2 x double> %2 +} + +define <2 x double> @insert_test_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fmul <2 x double> %a, %b + %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3> + ret <2 x double> %2 +} + +define <2 x double> @insert_test_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <2 x double> %a, %b + %2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3> + ret <2 x double> %2 +} + +define <4 x float> @insert_test2_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test2_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fadd <4 x float> %b, %a + %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x float> %2 +} + +define <4 x float> @insert_test2_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test2_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fsub <4 x float> %b, %a + %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x float> %2 +} + +define <4 x float> @insert_test2_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test2_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fmul <4 x float> %b, %a + %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x float> %2 +} + +define <4 x float> @insert_test2_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test2_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <4 x float> %b, %a + %2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7> + ret <4 x float> %2 +} + +define <2 x double> @insert_test2_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test2_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fadd <2 x double> %b, %a + %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3> + ret <2 x double> %2 +} + +define <2 x double> @insert_test2_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test2_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fsub <2 x double> %b, %a + %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3> + ret <2 x double> %2 +} + +define <2 x double> @insert_test2_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test2_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fmul <2 x double> %b, %a + %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3> + ret <2 x double> %2 +} + +define <2 x double> @insert_test2_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test2_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test2_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <2 x double> %b, %a + %2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3> + ret <2 x double> %2 +} + +define <4 x float> @insert_test3_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test3_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fadd <4 x float> %a, %b + %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test3_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test3_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fsub <4 x float> %a, %b + %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test3_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test3_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fmul <4 x float> %a, %b + %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1 + ret <4 x float> %2 +} +define <4 x float> @insert_test3_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test3_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <4 x float> %a, %b + %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1 + ret <4 x float> %2 +} + +define <2 x double> @insert_test3_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test3_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fadd <2 x double> %a, %b + %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test3_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test3_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fsub <2 x double> %a, %b + %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test3_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test3_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fmul <2 x double> %a, %b + %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test3_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test3_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test3_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <2 x double> %a, %b + %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1 + ret <2 x double> %2 +} + +define <4 x float> @insert_test4_add_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test4_add_ss: +; SSE: # BB#0: +; SSE-NEXT: addss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_add_ss: +; AVX: # BB#0: +; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fadd <4 x float> %b, %a + %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test4_sub_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test4_sub_ss: +; SSE: # BB#0: +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_sub_ss: +; AVX: # BB#0: +; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fsub <4 x float> %b, %a + %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test4_mul_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test4_mul_ss: +; SSE: # BB#0: +; SSE-NEXT: mulss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_mul_ss: +; AVX: # BB#0: +; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fmul <4 x float> %b, %a + %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1 + ret <4 x float> %2 +} + +define <4 x float> @insert_test4_div_ss(<4 x float> %a, <4 x float> %b) { +; SSE-LABEL: insert_test4_div_ss: +; SSE: # BB#0: +; SSE-NEXT: divss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_div_ss: +; AVX: # BB#0: +; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <4 x float> %b, %a + %2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1 + ret <4 x float> %2 +} + +define <2 x double> @insert_test4_add_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test4_add_sd: +; SSE: # BB#0: +; SSE-NEXT: addsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_add_sd: +; AVX: # BB#0: +; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fadd <2 x double> %b, %a + %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test4_sub_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test4_sub_sd: +; SSE: # BB#0: +; SSE-NEXT: subsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_sub_sd: +; AVX: # BB#0: +; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fsub <2 x double> %b, %a + %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test4_mul_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test4_mul_sd: +; SSE: # BB#0: +; SSE-NEXT: mulsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_mul_sd: +; AVX: # BB#0: +; AVX-NEXT: vmulsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fmul <2 x double> %b, %a + %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1 + ret <2 x double> %2 +} + +define <2 x double> @insert_test4_div_sd(<2 x double> %a, <2 x double> %b) { +; SSE-LABEL: insert_test4_div_sd: +; SSE: # BB#0: +; SSE-NEXT: divsd %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: insert_test4_div_sd: +; AVX: # BB#0: +; AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %1 = fdiv <2 x double> %b, %a + %2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1 + ret <2 x double> %2 +} |