1 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c++ -emit-llvm %s -o - -DUNTIEDRT | FileCheck %s --check-prefix CHECK --check-prefix UNTIEDRT 2 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s -DUNTIEDRT 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK --check-prefix UNTIEDRT 4 // 5 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-enable-irbuilder -x c++ -emit-llvm %s -o - | FileCheck %s 6 // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s 7 // RUN: %clang_cc1 -fopenmp -fopenmp-enable-irbuilder -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 8 9 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -x c++ -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s 10 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-pch -o %t %s 11 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s 12 // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} 13 // expected-no-diagnostics 14 #ifndef HEADER 15 #define HEADER 16 17 enum omp_allocator_handle_t { 18 omp_null_allocator = 0, 19 omp_default_mem_alloc = 1, 20 omp_large_cap_mem_alloc = 2, 21 omp_const_mem_alloc = 3, 22 omp_high_bw_mem_alloc = 4, 23 omp_low_lat_mem_alloc = 5, 24 omp_cgroup_mem_alloc = 6, 25 omp_pteam_mem_alloc = 7, 26 omp_thread_mem_alloc = 8, 27 KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__ 28 }; 29 30 // CHECK-DAG: [[IDENT_T:%.+]] = type { i32, i32, i32, i32, i8* } 31 // CHECK-DAG: [[STRUCT_SHAREDS:%.+]] = type { i8*, [2 x [[STRUCT_S:%.+]]]* } 32 // CHECK-DAG: [[STRUCT_SHAREDS1:%.+]] = type { [2 x [[STRUCT_S:%.+]]]* } 33 // CHECK-DAG: [[KMP_TASK_T:%.+]] = type { i8*, i32 (i32, i8*)*, i32, %union{{.+}}, %union{{.+}} } 34 // CHECK-DAG: [[KMP_DEPEND_INFO:%.+]] = type { i64, i64, i8 } 35 struct S { 36 int a; 37 S() : a(0) {} 38 S(const S &s) : a(s.a) {} 39 ~S() {} 40 }; 41 int a; 42 // CHECK-LABEL: @main 43 int main() { 44 // CHECK: [[B:%.+]] = alloca i8 45 // CHECK: [[S:%.+]] = alloca [2 x [[STRUCT_S]]] 46 char b; 47 S s[2]; 48 int arr[10][a]; 49 // CHECK: [[B_REF:%.+]] = getelementptr inbounds [[STRUCT_SHAREDS]], [[STRUCT_SHAREDS]]* [[CAPTURES:%.+]], i32 0, i32 0 50 // CHECK: store i8* [[B]], i8** [[B_REF]] 51 // CHECK: [[S_REF:%.+]] = getelementptr inbounds [[STRUCT_SHAREDS]], [[STRUCT_SHAREDS]]* [[CAPTURES]], i32 0, i32 1 52 // CHECK: store [2 x [[STRUCT_S]]]* [[S]], [2 x [[STRUCT_S]]]** [[S_REF]] 53 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 33, i64 40, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_T]]{{.*}}*)* [[TASK_ENTRY1:@.+]] to i32 (i32, i8*)*)) 54 // CHECK: [[SHAREDS_REF_PTR:%.+]] = getelementptr inbounds [[KMP_TASK_T]], [[KMP_TASK_T]]* [[TASK_PTR:%.+]], i32 0, i32 0 55 // CHECK: [[SHAREDS_REF:%.+]] = load i8*, i8** [[SHAREDS_REF_PTR]] 56 // CHECK: [[BITCAST:%.+]] = bitcast [[STRUCT_SHAREDS]]* [[CAPTURES]] to i8* 57 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[SHAREDS_REF]], i8* align 8 [[BITCAST]], i64 16, i1 false) 58 // CHECK: [[PRIORITY_REF_PTR:%.+]] = getelementptr inbounds [[KMP_TASK_T]], [[KMP_TASK_T]]* [[TASK_PTR]], i32 0, i32 4 59 // CHECK: [[PRIORITY:%.+]] = bitcast %union{{.+}}* [[PRIORITY_REF_PTR]] to i32* 60 // CHECK: store i32 {{.+}}, i32* [[PRIORITY]] 61 // CHECK: call i32 @__kmpc_omp_task([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]]) 62 #pragma omp task shared(a, b, s) priority(b) 63 { 64 a = 15; 65 b = a; 66 s[0].a = 10; 67 } 68 // CHECK: [[S_REF:%.+]] = getelementptr inbounds [[STRUCT_SHAREDS1]], [[STRUCT_SHAREDS1]]* [[CAPTURES:%.+]], i32 0, i32 0 69 // CHECK: store [2 x [[STRUCT_S]]]* [[S]], [2 x [[STRUCT_S]]]** [[S_REF]] 70 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{[^,]+}}, i32 {{%.*}}, i32 1, i64 40, i64 8, 71 // CHECK: [[SHAREDS_REF_PTR:%.+]] = getelementptr inbounds [[KMP_TASK_T]], [[KMP_TASK_T]]* [[TASK_PTR:%.+]], i32 0, i32 0 72 // CHECK: [[SHAREDS_REF:%.+]] = load i8*, i8** [[SHAREDS_REF_PTR]] 73 // CHECK: [[BITCAST:%.+]] = bitcast [[STRUCT_SHAREDS1]]* [[CAPTURES]] to i8* 74 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[SHAREDS_REF]], i8* align 8 [[BITCAST]], i64 8, i1 false) 75 // CHECK: [[DEP_BASE:%.*]] = getelementptr inbounds [4 x [[KMP_DEPEND_INFO]]], [4 x [[KMP_DEPEND_INFO]]]* [[DEPENDENCIES:%.*]], i64 0, i64 0 76 // CHECK: [[DEP:%.+]] = getelementptr [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP_BASE]], i64 0 77 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 0 78 // CHECK: store i64 ptrtoint (i32* @{{.+}} to i64), i64* [[T0]] 79 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 1 80 // CHECK: store i64 4, i64* [[T0]] 81 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 2 82 // CHECK: store i8 1, i8* [[T0]] 83 // CHECK: [[DEP:%.*]] = getelementptr [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP_BASE]], i64 1 84 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 0 85 // CHECK: ptrtoint i8* [[B]] to i64 86 // CHECK: store i64 %{{[^,]+}}, i64* [[T0]] 87 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 1 88 // CHECK: store i64 1, i64* [[T0]] 89 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 2 90 // CHECK: store i8 1, i8* [[T0]] 91 // CHECK: [[DEP:%.*]] = getelementptr [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP_BASE]], i64 2 92 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 0 93 // CHECK: ptrtoint [2 x [[STRUCT_S]]]* [[S]] to i64 94 // CHECK: store i64 %{{[^,]+}}, i64* [[T0]] 95 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 1 96 // CHECK: store i64 8, i64* [[T0]] 97 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 2 98 // CHECK: store i8 1, i8* [[T0]] 99 // CHECK: [[IDX1:%.+]] = mul nsw i64 0, [[A_VAL:%.+]] 100 // CHECK: [[START:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 101 // CHECK: [[IDX1:%.+]] = mul nsw i64 9, [[A_VAL]] 102 // CHECK: [[END:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 103 // CHECK: [[END1:%.+]] = getelementptr i32, i32* [[END]], i32 1 104 // CHECK: [[START_INT:%.+]] = ptrtoint i32* [[START]] to i64 105 // CHECK: [[END_INT:%.+]] = ptrtoint i32* [[END1]] to i64 106 // CHECK: [[SIZEOF:%.+]] = sub nuw i64 [[END_INT]], [[START_INT]] 107 // CHECK: [[DEP:%.*]] = getelementptr [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP_BASE]], i64 3 108 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* [[DEP]], i32 0, i32 0 109 // CHECK: [[T1:%.*]] = ptrtoint i32* [[START]] to i64 110 // CHECK: store i64 [[T1]], i64* [[T0]] 111 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1 112 // CHECK: store i64 [[SIZEOF]], i64* [[T0]] 113 // CHECK: [[T0:%.*]] = getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2 114 // CHECK: store i8 1, i8* [[T0]] 115 // CHECK: bitcast [[KMP_DEPEND_INFO]]* [[DEP_BASE]] to i8* 116 // CHECK: call i32 @__kmpc_omp_task_with_deps([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]], i32 4, i8* %{{[^,]+}}, i32 0, i8* null) 117 #pragma omp task shared(a, s) depend(in : a, b, s, arr[:]) 118 { 119 a = 15; 120 s[1].a = 10; 121 } 122 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 0, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_T]]{{.*}}*)* [[TASK_ENTRY2:@.+]] to i32 (i32, i8*)*)) 123 // CHECK: call i32 @__kmpc_omp_task([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]]) 124 #pragma omp task untied 125 { 126 #pragma omp critical 127 a = 1; 128 } 129 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 0, i64 40, i64 1, 130 // CHECK: getelementptr inbounds [2 x [[STRUCT_S]]], [2 x [[STRUCT_S]]]* [[S]], i64 0, i64 0 131 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0 132 // CHECK: ptrtoint [[STRUCT_S]]* %{{.+}} to i64 133 // CHECK: store i64 %{{[^,]+}}, i64* 134 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1 135 // CHECK: store i64 4, i64* 136 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2 137 // CHECK: store i8 3, i8* 138 // CHECK: [[B_VAL:%.+]] = load i8, i8* [[B]] 139 // CHECK: [[IDX2:%.+]] = sext i8 [[B_VAL]] to i64 140 // CHECK: [[IDX1:%.+]] = mul nsw i64 4, [[A_VAL]] 141 // CHECK: [[START:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 142 // CHECK: [[START1:%.+]] = getelementptr inbounds i32, i32* [[START]], i64 [[IDX2]] 143 // CHECK: [[B_VAL:%.+]] = load i8, i8* [[B]] 144 // CHECK: [[IDX2:%.+]] = sext i8 [[B_VAL]] to i64 145 // CHECK: [[IDX1:%.+]] = mul nsw i64 9, [[A_VAL]] 146 // CHECK: [[END:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 147 // CHECK: [[END1:%.+]] = getelementptr inbounds i32, i32* [[END]], i64 [[IDX2]] 148 // CHECK: [[END2:%.+]] = getelementptr i32, i32* [[END1]], i32 1 149 // CHECK: [[START_INT:%.+]] = ptrtoint i32* [[START1]] to i64 150 // CHECK: [[END_INT:%.+]] = ptrtoint i32* [[END2]] to i64 151 // CHECK: [[SIZEOF:%.+]] = sub nuw i64 [[END_INT]], [[START_INT]] 152 // CHECK: getelementptr [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i64 1 153 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0 154 // CHECK: ptrtoint i32* [[START1]] to i64 155 // CHECK: store i64 %{{[^,]+}}, i64* 156 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1 157 // CHECK: store i64 [[SIZEOF]], i64* 158 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2 159 // CHECK: store i8 3, i8* 160 // CHECK: bitcast [[KMP_DEPEND_INFO]]* %{{.+}} to i8* 161 // CHECK: call i32 @__kmpc_omp_task_with_deps([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]], i32 2, i8* %{{[^,]+}}, i32 0, i8* null) 162 #pragma omp task untied depend(out : s[0], arr[4:][b]) 163 { 164 a = 1; 165 } 166 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 0, i64 40, i64 1, 167 // CHECK: getelementptr inbounds [2 x [[STRUCT_S]]], [2 x [[STRUCT_S]]]* [[S]], i64 0, i64 0 168 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0 169 // CHECK: ptrtoint [[STRUCT_S]]* %{{.+}} to i64 170 // CHECK: store i64 %{{[^,]+}}, i64* 171 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1 172 // CHECK: store i64 4, i64* 173 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2 174 // CHECK: store i8 4, i8* 175 // CHECK: [[B_VAL:%.+]] = load i8, i8* [[B]] 176 // CHECK: [[IDX2:%.+]] = sext i8 [[B_VAL]] to i64 177 // CHECK: [[IDX1:%.+]] = mul nsw i64 4, [[A_VAL]] 178 // CHECK: [[START:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 179 // CHECK: [[START1:%.+]] = getelementptr inbounds i32, i32* [[START]], i64 [[IDX2]] 180 // CHECK: [[B_VAL:%.+]] = load i8, i8* [[B]] 181 // CHECK: [[IDX2:%.+]] = sext i8 [[B_VAL]] to i64 182 // CHECK: [[IDX1:%.+]] = mul nsw i64 9, [[A_VAL]] 183 // CHECK: [[END:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 184 // CHECK: [[END1:%.+]] = getelementptr inbounds i32, i32* [[END]], i64 [[IDX2]] 185 // CHECK: [[END2:%.+]] = getelementptr i32, i32* [[END1]], i32 1 186 // CHECK: [[START_INT:%.+]] = ptrtoint i32* [[START1]] to i64 187 // CHECK: [[END_INT:%.+]] = ptrtoint i32* [[END2]] to i64 188 // CHECK: [[SIZEOF:%.+]] = sub nuw i64 [[END_INT]], [[START_INT]] 189 // CHECK: getelementptr [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i64 1 190 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0 191 // CHECK: ptrtoint i32* [[START1]] to i64 192 // CHECK: store i64 %{{[^,]+}}, i64* 193 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1 194 // CHECK: store i64 [[SIZEOF]], i64* 195 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2 196 // CHECK: store i8 4, i8* 197 // CHECK: bitcast [[KMP_DEPEND_INFO]]* %{{.+}} to i8* 198 // CHECK: call i32 @__kmpc_omp_task_with_deps([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]], i32 2, i8* %{{[^,]+}}, i32 0, i8* null) 199 #pragma omp task untied depend(mutexinoutset: s[0], arr[4:][b]) 200 { 201 a = 1; 202 } 203 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 3, i64 40, i64 1, 204 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0 205 // CHECK: store i64 ptrtoint (i32* @{{.+}} to i64), i64* 206 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1 207 // CHECK: store i64 4, i64* 208 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2 209 // CHECK: store i8 3, i8* 210 // CHECK: getelementptr inbounds [2 x [[STRUCT_S]]], [2 x [[STRUCT_S]]]* [[S]], i64 0, i64 1 211 // CHECK: getelementptr [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i64 1 212 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0 213 // CHECK: ptrtoint [[STRUCT_S]]* %{{.+}} to i64 214 // CHECK: store i64 %{{[^,]+}}, i64* 215 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1 216 // CHECK: store i64 4, i64* 217 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2 218 // CHECK: store i8 3, i8* 219 // CHECK: [[IDX1:%.+]] = mul nsw i64 0, [[A_VAL]] 220 // CHECK: [[START:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 221 // CHECK: [[START1:%.+]] = getelementptr inbounds i32, i32* [[START]], i64 3 222 // CHECK: [[NEW_A_VAL:%.+]] = load i32, i32* @{{.+}}, 223 // CHECK: [[NEW_A_VAL_I64:%.+]] = sext i32 [[NEW_A_VAL]] to i64 224 // CHECK: [[IDX2:%.+]] = sub nsw i64 [[NEW_A_VAL_I64]], 1 225 // CHECK: [[NEW_A_VAL:%.+]] = load i32, i32* @{{.+}}, 226 // CHECK: [[NEW_A_VAL_I64:%.+]] = sext i32 [[NEW_A_VAL]] to i64 227 // CHECK: [[SUB:%.+]] = add nsw i64 -1, [[NEW_A_VAL_I64]] 228 // CHECK: [[IDX1:%.+]] = mul nsw i64 [[SUB]], [[A_VAL]] 229 // CHECK: [[END:%.+]] = getelementptr inbounds i32, i32* %{{.+}}, i64 [[IDX1]] 230 // CHECK: [[END1:%.+]] = getelementptr inbounds i32, i32* [[END]], i64 [[IDX2]] 231 // CHECK: [[END2:%.+]] = getelementptr i32, i32* [[END1]], i32 1 232 // CHECK: [[START_INT:%.+]] = ptrtoint i32* [[START1]] to i64 233 // CHECK: [[END_INT:%.+]] = ptrtoint i32* [[END2]] to i64 234 // CHECK: [[SIZEOF:%.+]] = sub nuw i64 [[END_INT]], [[START_INT]] 235 // CHECK: getelementptr [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i64 2 236 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 0 237 // CHECK: ptrtoint i32* [[START1]] to i64 238 // CHECK: store i64 %{{[^,]+}}, i64* 239 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 1 240 // CHECK: store i64 [[SIZEOF]], i64* 241 // CHECK: getelementptr inbounds [[KMP_DEPEND_INFO]], [[KMP_DEPEND_INFO]]* %{{[^,]+}}, i32 0, i32 2 242 // CHECK: store i8 3, i8* 243 // CHECK: bitcast [[KMP_DEPEND_INFO]]* %{{.+}} to i8* 244 // CHECK: call i32 @__kmpc_omp_task_with_deps([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]], i32 3, i8* %{{[^,]+}}, i32 0, i8* null) 245 #pragma omp task final(true) depend(inout: a, s[1], arr[:a][3:]) 246 { 247 a = 2; 248 } 249 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 3, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_T]]{{.*}}*)* [[TASK_ENTRY3:@.+]] to i32 (i32, i8*)*)) 250 // CHECK: call i32 @__kmpc_omp_task([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]]) 251 #pragma omp task final(true) 252 { 253 a = 2; 254 } 255 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_T]]{{.*}}*)* [[TASK_ENTRY4:@.+]] to i32 (i32, i8*)*)) 256 // CHECK: call i32 @__kmpc_omp_task([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]]) 257 const bool flag = false; 258 #pragma omp task final(flag) 259 { 260 a = 3; 261 } 262 // CHECK: [[B_VAL:%.+]] = load i8, i8* [[B]] 263 // CHECK: [[CMP:%.+]] = icmp ne i8 [[B_VAL]], 0 264 // CHECK: [[FINAL:%.+]] = select i1 [[CMP]], i32 2, i32 0 265 // CHECK: [[FLAGS:%.+]] = or i32 [[FINAL]], 1 266 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 [[FLAGS]], i64 40, i64 8, i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_T]]{{.*}}*)* [[TASK_ENTRY5:@.+]] to i32 (i32, i8*)*)) 267 // CHECK: call i32 @__kmpc_omp_task([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]]) 268 int c __attribute__((aligned(128))); 269 #pragma omp task final(b) shared(c) 270 { 271 a = 4; 272 c = 5; 273 } 274 // CHECK: [[ORIG_TASK_PTR:%.+]] = call i8* @__kmpc_omp_task_alloc([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i32 0, i64 256, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, [[KMP_TASK_T]]{{.*}}*)* [[TASK_ENTRY6:@.+]] to i32 (i32, i8*)*)) 275 // CHECK: call i32 @__kmpc_omp_task([[IDENT_T]]* @{{.+}}, i32 {{%.*}}, i8* [[ORIG_TASK_PTR]]) 276 #pragma omp task untied firstprivate(c) allocate(omp_pteam_mem_alloc:c) 277 { 278 S s1, s2; 279 #ifdef UNTIEDRT 280 #pragma omp allocate(s2) allocator(omp_pteam_mem_alloc) 281 #endif 282 s2.a = 0; 283 #pragma omp task 284 a = c = 4; 285 #pragma omp taskyield 286 s1 = S(); 287 s2.a = 10; 288 #pragma omp taskwait 289 } 290 return a; 291 } 292 // CHECK: define internal i32 [[TASK_ENTRY1]](i32 %0, [[KMP_TASK_T]]{{.*}}* noalias %1) 293 // CHECK: store i32 15, i32* [[A_PTR:@.+]], 294 // CHECK: [[A_VAL:%.+]] = load i32, i32* [[A_PTR]] 295 // CHECK: [[A_VAL_I8:%.+]] = trunc i32 [[A_VAL]] to i8 296 // CHECK: store i8 [[A_VAL_I8]], i8* %{{.+}} 297 // CHECK: store i32 10, i32* %{{.+}} 298 299 // CHECK: define internal i32 [[TASK_ENTRY2]](i32 %0, [[KMP_TASK_T]]{{.*}}* noalias %1) 300 // CHECK: store i32 1, i32* [[A_PTR]] 301 302 // CHECK: define internal i32 [[TASK_ENTRY3]](i32 %0, [[KMP_TASK_T]]{{.*}}* noalias %1) 303 // CHECK: store i32 2, i32* [[A_PTR]] 304 305 // CHECK: define internal i32 [[TASK_ENTRY4]](i32 %0, [[KMP_TASK_T]]{{.*}}* noalias %1) 306 // CHECK: store i32 3, i32* [[A_PTR]] 307 308 // CHECK: define internal i32 [[TASK_ENTRY5]](i32 %0, [[KMP_TASK_T]]{{.*}}* noalias %1) 309 // CHECK: store i32 4, i32* [[A_PTR]] 310 // CHECK: store i32 5, i32* [[C_PTR:%.+]], align 128 311 312 // CHECK: define internal i32 313 // CHECK: store i32 4, i32* [[A_PTR]] 314 315 // CHECK: define internal i32 [[TASK_ENTRY6]](i32 %0, [[KMP_TASK_T]]{{.*}}* noalias %{{.+}}) 316 // UNTIEDRT: [[S1_ADDR_PTR:%.+]] = alloca %struct.S*, 317 // UNTIEDRT: [[S2_ADDR_PTR_REF:%.+]] = alloca %struct.S**, 318 // UNTIEDRT: call void (i8*, ...) %{{.+}}(i8* %{{.+}}, %struct.S** [[S1_ADDR_PTR]], %struct.S*** [[S2_ADDR_PTR_REF]]) 319 // UNTIEDRT-DAG: [[S1_ADDR:%.+]] = load %struct.S*, %struct.S** [[S1_ADDR_PTR]], 320 // UNTIEDRT-DAG: [[S2_ADDR_PTR:%.+]] = load %struct.S**, %struct.S*** [[S2_ADDR_PTR_REF]], 321 // UNTIEDRT-DAG: [[S2_ADDR:%.+]] = load %struct.S*, %struct.S** [[S2_ADDR_PTR]], 322 // CHECK: switch i32 %{{.+}}, label %[[DONE:.+]] [ 323 324 // CHECK: [[DONE]]: 325 // CHECK: br label %[[CLEANUP:[^,]+]] 326 327 // CHECK: load i32*, i32** % 328 // CHECK: store i32 1, i32* % 329 // CHECK: call i32 @__kmpc_omp_task(% 330 // UNTIEDRT: br label %[[EXIT:[^,]+]] 331 332 // UNTIEDRT: call void [[CONSTR:@.+]](%struct.S* [[S1_ADDR]]) 333 // UNTIEDRT: [[S2_VOID_PTR:%.+]] = call i8* @__kmpc_alloc(i32 %{{.+}}, i64 4, i8* inttoptr (i64 7 to i8*)) 334 // UNTIEDRT: [[S2_PTR:%.+]] = bitcast i8* [[S2_VOID_PTR]] to %struct.S* 335 // UNTIEDRT: store %struct.S* [[S2_PTR]], %struct.S** [[S2_ADDR_PTR]], 336 // UNTIEDRT: load i32*, i32** % 337 // UNTIEDRT: store i32 2, i32* % 338 // UNTIEDRT: call i32 @__kmpc_omp_task(% 339 // UNTIEDRT: br label %[[EXIT]] 340 341 // UNTIEDRT: call void [[CONSTR]](%struct.S* [[S2_ADDR]]) 342 // CHECK: call i8* @__kmpc_omp_task_alloc( 343 // CHECK: call i32 @__kmpc_omp_task(% 344 // CHECK: load i32*, i32** % 345 // CHECK: store i32 {{2|3}}, i32* % 346 // CHECK: call i32 @__kmpc_omp_task(% 347 // UNTIEDRT: br label %[[EXIT]] 348 349 // CHECK: call i32 @__kmpc_omp_taskyield(% 350 // CHECK: load i32*, i32** % 351 // CHECK: store i32 {{3|4}}, i32* % 352 // CHECK: call i32 @__kmpc_omp_task(% 353 // UNTIEDRT: br label %[[EXIT]] 354 355 // s1 = S(); 356 // UNTIEDRT: call void [[CONSTR]](%struct.S* [[TMP:%.+]]) 357 // UNTIEDRT: [[DST:%.+]] = bitcast %struct.S* [[S1_ADDR]] to i8* 358 // UNTIEDRT: [[SRC:%.+]] = bitcast %struct.S* [[TMP]] to i8* 359 // UNTIEDRT: call void @llvm.memcpy.{{.+}}(i8* {{.*}}[[DST]], i8* {{.*}}[[SRC]], i64 4, i1 false) 360 // UNTIEDRT: call void [[DESTR:@.+]](%struct.S* [[TMP]]) 361 362 // CHECK: call i32 @__kmpc_omp_taskwait(% 363 // CHECK: load i32*, i32** % 364 // CHECK: store i32 {{4|5}}, i32* % 365 // CHECK: call i32 @__kmpc_omp_task(% 366 // UNTIEDRT: br label %[[EXIT]] 367 368 // UNTIEDRT: call void [[DESTR]](%struct.S* [[S2_ADDR]]) 369 // UNTIEDRT: [[S2_VOID_PTR:%.+]] = bitcast %struct.S* [[S2_ADDR]] to i8* 370 // UNTIEDRT: call void @__kmpc_free(i32 %{{.+}}, i8* [[S2_VOID_PTR]], i8* inttoptr (i64 7 to i8*)) 371 // UNTIEDRT: call void [[DESTR]](%struct.S* [[S1_ADDR]]) 372 // CHECK: br label %[[CLEANUP]] 373 374 // CHECK: [[CLEANUP]]: 375 // UNTIEDRT: br label %[[EXIT]] 376 377 // UNTIEDRT: [[EXIT]]: 378 // UNTIEDRT-NEXT: ret i32 0 379 380 struct S1 { 381 int a; 382 S1() { taskinit(); } 383 void taskinit() { 384 #pragma omp task 385 a = 0; 386 } 387 } s1; 388 389 // CHECK-LABEL: taskinit 390 // CHECK: call i8* @__kmpc_omp_task_alloc( 391 392 #endif 393