diff options
author | Justin Holewinski <jholewinski@nvidia.com> | 2012-05-04 20:18:50 +0000 |
---|---|---|
committer | Justin Holewinski <jholewinski@nvidia.com> | 2012-05-04 20:18:50 +0000 |
commit | 49683f3c961379fbc088871a5d6304950f1f1cbc (patch) | |
tree | 830fa1ee9c992ef4645863d128be912ce2bfc987 /test/CodeGen/NVPTX/st-addrspace.ll | |
parent | 2c7e5c714c8675f757c4936a3a2132c2466a626c (diff) | |
download | external_llvm-49683f3c961379fbc088871a5d6304950f1f1cbc.zip external_llvm-49683f3c961379fbc088871a5d6304950f1f1cbc.tar.gz external_llvm-49683f3c961379fbc088871a5d6304950f1f1cbc.tar.bz2 |
This patch adds a new NVPTX back-end to LLVM which supports code generation for NVIDIA PTX 3.0. This back-end will (eventually) replace the current PTX back-end, while maintaining compatibility with it.
The new target machines are:
nvptx (old ptx32) => 32-bit PTX
nvptx64 (old ptx64) => 64-bit PTX
The sources are based on the internal NVIDIA NVPTX back-end, and
contain more functionality than the current PTX back-end currently
provides.
NV_CONTRIB
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@156196 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'test/CodeGen/NVPTX/st-addrspace.ll')
-rw-r--r-- | test/CodeGen/NVPTX/st-addrspace.ll | 179 |
1 files changed, 179 insertions, 0 deletions
diff --git a/test/CodeGen/NVPTX/st-addrspace.ll b/test/CodeGen/NVPTX/st-addrspace.ll new file mode 100644 index 0000000..54e04ae --- /dev/null +++ b/test/CodeGen/NVPTX/st-addrspace.ll @@ -0,0 +1,179 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_10 | FileCheck %s --check-prefix=PTX32 +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX32 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_10 | FileCheck %s --check-prefix=PTX64 +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX64 + + +;; i8 + +define void @st_global_i8(i8 addrspace(1)* %ptr, i8 %a) { +; PTX32: st.global.u8 [%r{{[0-9]+}}], %rc{{[0-9]+}} +; PTX32: ret +; PTX64: st.global.u8 [%rl{{[0-9]+}}], %rc{{[0-9]+}} +; PTX64: ret + store i8 %a, i8 addrspace(1)* %ptr + ret void +} + +define void @st_shared_i8(i8 addrspace(3)* %ptr, i8 %a) { +; PTX32: st.shared.u8 [%r{{[0-9]+}}], %rc{{[0-9]+}} +; PTX32: ret +; PTX64: st.shared.u8 [%rl{{[0-9]+}}], %rc{{[0-9]+}} +; PTX64: ret + store i8 %a, i8 addrspace(3)* %ptr + ret void +} + +define void @st_local_i8(i8 addrspace(5)* %ptr, i8 %a) { +; PTX32: st.local.u8 [%r{{[0-9]+}}], %rc{{[0-9]+}} +; PTX32: ret +; PTX64: st.local.u8 [%rl{{[0-9]+}}], %rc{{[0-9]+}} +; PTX64: ret + store i8 %a, i8 addrspace(5)* %ptr + ret void +} + +;; i16 + +define void @st_global_i16(i16 addrspace(1)* %ptr, i16 %a) { +; PTX32: st.global.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}} +; PTX32: ret +; PTX64: st.global.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: ret + store i16 %a, i16 addrspace(1)* %ptr + ret void +} + +define void @st_shared_i16(i16 addrspace(3)* %ptr, i16 %a) { +; PTX32: st.shared.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}} +; PTX32: ret +; PTX64: st.shared.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: ret + store i16 %a, i16 addrspace(3)* %ptr + ret void +} + +define void @st_local_i16(i16 addrspace(5)* %ptr, i16 %a) { +; PTX32: st.local.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}} +; PTX32: ret +; PTX64: st.local.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: ret + store i16 %a, i16 addrspace(5)* %ptr + ret void +} + +;; i32 + +define void @st_global_i32(i32 addrspace(1)* %ptr, i32 %a) { +; PTX32: st.global.u32 [%r{{[0-9]+}}], %r{{[0-9]+}} +; PTX32: ret +; PTX64: st.global.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}} +; PTX64: ret + store i32 %a, i32 addrspace(1)* %ptr + ret void +} + +define void @st_shared_i32(i32 addrspace(3)* %ptr, i32 %a) { +; PTX32: st.shared.u32 [%r{{[0-9]+}}], %r{{[0-9]+}} +; PTX32: ret +; PTX64: st.shared.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}} +; PTX64: ret + store i32 %a, i32 addrspace(3)* %ptr + ret void +} + +define void @st_local_i32(i32 addrspace(5)* %ptr, i32 %a) { +; PTX32: st.local.u32 [%r{{[0-9]+}}], %r{{[0-9]+}} +; PTX32: ret +; PTX64: st.local.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}} +; PTX64: ret + store i32 %a, i32 addrspace(5)* %ptr + ret void +} + +;; i64 + +define void @st_global_i64(i64 addrspace(1)* %ptr, i64 %a) { +; PTX32: st.global.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}} +; PTX32: ret +; PTX64: st.global.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}} +; PTX64: ret + store i64 %a, i64 addrspace(1)* %ptr + ret void +} + +define void @st_shared_i64(i64 addrspace(3)* %ptr, i64 %a) { +; PTX32: st.shared.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}} +; PTX32: ret +; PTX64: st.shared.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}} +; PTX64: ret + store i64 %a, i64 addrspace(3)* %ptr + ret void +} + +define void @st_local_i64(i64 addrspace(5)* %ptr, i64 %a) { +; PTX32: st.local.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}} +; PTX32: ret +; PTX64: st.local.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}} +; PTX64: ret + store i64 %a, i64 addrspace(5)* %ptr + ret void +} + +;; f32 + +define void @st_global_f32(float addrspace(1)* %ptr, float %a) { +; PTX32: st.global.f32 [%r{{[0-9]+}}], %f{{[0-9]+}} +; PTX32: ret +; PTX64: st.global.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}} +; PTX64: ret + store float %a, float addrspace(1)* %ptr + ret void +} + +define void @st_shared_f32(float addrspace(3)* %ptr, float %a) { +; PTX32: st.shared.f32 [%r{{[0-9]+}}], %f{{[0-9]+}} +; PTX32: ret +; PTX64: st.shared.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}} +; PTX64: ret + store float %a, float addrspace(3)* %ptr + ret void +} + +define void @st_local_f32(float addrspace(5)* %ptr, float %a) { +; PTX32: st.local.f32 [%r{{[0-9]+}}], %f{{[0-9]+}} +; PTX32: ret +; PTX64: st.local.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}} +; PTX64: ret + store float %a, float addrspace(5)* %ptr + ret void +} + +;; f64 + +define void @st_global_f64(double addrspace(1)* %ptr, double %a) { +; PTX32: st.global.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}} +; PTX32: ret +; PTX64: st.global.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}} +; PTX64: ret + store double %a, double addrspace(1)* %ptr + ret void +} + +define void @st_shared_f64(double addrspace(3)* %ptr, double %a) { +; PTX32: st.shared.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}} +; PTX32: ret +; PTX64: st.shared.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}} +; PTX64: ret + store double %a, double addrspace(3)* %ptr + ret void +} + +define void @st_local_f64(double addrspace(5)* %ptr, double %a) { +; PTX32: st.local.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}} +; PTX32: ret +; PTX64: st.local.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}} +; PTX64: ret + store double %a, double addrspace(5)* %ptr + ret void +} |