1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // Test device global memory data sharing codegen.
3 ///==========================================================================///
4
5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK
7
8 // expected-no-diagnostics
9
10 #ifndef HEADER
11 #define HEADER
12
test_ds()13 void test_ds(){
14 #pragma omp target
15 {
16 int a = 10;
17 #pragma omp parallel
18 {
19 a = 1000;
20 }
21 int b = 100;
22 int c = 1000;
23 #pragma omp parallel private(c)
24 {
25 int *c1 = &c;
26 b = a + 10000;
27 }
28 }
29 }
30
31 #endif
32
33 // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7test_dsv_l14
34 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8
37 // CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
38 // CHECK-NEXT: [[CAPTURED_VARS_ADDRS1:%.*]] = alloca [2 x i8*], align 8
39 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 1, i1 true, i1 true)
40 // CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1
41 // CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
42 // CHECK: user_code.entry:
43 // CHECK-NEXT: [[A:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 4)
44 // CHECK-NEXT: [[A_ON_STACK:%.*]] = bitcast i8* [[A]] to i32*
45 // CHECK-NEXT: [[B:%.*]] = call align 8 i8* @__kmpc_alloc_shared(i64 4)
46 // CHECK-NEXT: [[B_ON_STACK:%.*]] = bitcast i8* [[B]] to i32*
47 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
48 // CHECK-NEXT: store i32 10, i32* [[A_ON_STACK]], align 4
49 // CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
50 // CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[A_ON_STACK]] to i8*
51 // CHECK-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8
52 // CHECK-NEXT: [[TMP4:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8**
53 // CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP4]], i64 1)
54 // CHECK-NEXT: store i32 100, i32* [[B_ON_STACK]], align 4
55 // CHECK-NEXT: store i32 1000, i32* [[C]], align 4
56 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS1]], i64 0, i64 0
57 // CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[B_ON_STACK]] to i8*
58 // CHECK-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
59 // CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS1]], i64 0, i64 1
60 // CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[A_ON_STACK]] to i8*
61 // CHECK-NEXT: store i8* [[TMP8]], i8** [[TMP7]], align 8
62 // CHECK-NEXT: [[TMP9:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS1]] to i8**
63 // CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, i32*, i32*)* @__omp_outlined__1 to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined__1_wrapper to i8*), i8** [[TMP9]], i64 2)
64 // CHECK-NEXT: call void @__kmpc_free_shared(i8* [[B]], i64 4)
65 // CHECK-NEXT: call void @__kmpc_free_shared(i8* [[A]], i64 4)
66 // CHECK-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 1, i1 true)
67 // CHECK-NEXT: ret void
68 // CHECK: worker.exit:
69 // CHECK-NEXT: ret void
70 //
71 //
72 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__
73 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] {
74 // CHECK-NEXT: entry:
75 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
76 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
77 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
78 // CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
79 // CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
80 // CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
81 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
82 // CHECK-NEXT: store i32 1000, i32* [[TMP0]], align 4
83 // CHECK-NEXT: ret void
84 //
85 //
86 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined___wrapper
87 // CHECK-SAME: (i16 noundef zeroext [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
88 // CHECK-NEXT: entry:
89 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
90 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
91 // CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
92 // CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
93 // CHECK-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
94 // CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
95 // CHECK-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
96 // CHECK-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
97 // CHECK-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
98 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
99 // CHECK-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
100 // CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
101 // CHECK-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]]) #[[ATTR4:[0-9]+]]
102 // CHECK-NEXT: ret void
103 //
104 //
105 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__1
106 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[B:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] {
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
109 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
110 // CHECK-NEXT: [[B_ADDR:%.*]] = alloca i32*, align 8
111 // CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8
112 // CHECK-NEXT: [[C:%.*]] = alloca i32, align 4
113 // CHECK-NEXT: [[C1:%.*]] = alloca i32*, align 8
114 // CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
115 // CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
116 // CHECK-NEXT: store i32* [[B]], i32** [[B_ADDR]], align 8
117 // CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8
118 // CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[B_ADDR]], align 8
119 // CHECK-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8
120 // CHECK-NEXT: store i32* [[C]], i32** [[C1]], align 8
121 // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
122 // CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], 10000
123 // CHECK-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
124 // CHECK-NEXT: ret void
125 //
126 //
127 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper
128 // CHECK-SAME: (i16 noundef zeroext [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR3]] {
129 // CHECK-NEXT: entry:
130 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2
131 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
132 // CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
133 // CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8
134 // CHECK-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2
135 // CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4
136 // CHECK-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4
137 // CHECK-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]])
138 // CHECK-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8
139 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0
140 // CHECK-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32**
141 // CHECK-NEXT: [[TMP5:%.*]] = load i32*, i32** [[TMP4]], align 8
142 // CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1
143 // CHECK-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32**
144 // CHECK-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8
145 // CHECK-NEXT: call void @__omp_outlined__1(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], i32* [[TMP5]], i32* [[TMP8]]) #[[ATTR4]]
146 // CHECK-NEXT: ret void
147 //
148