1 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -g -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fexceptions -fcxx-exceptions -gline-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG 5 // REQUIRES: x86-registered-target 6 // expected-no-diagnostics 7 #ifndef HEADER 8 #define HEADER 9 10 long long get_val() { return 0; } 11 double *g_ptr; 12 13 // CHECK-LABEL: define {{.*void}} @{{.*}}simple{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 14 void simple(float *a, float *b, float *c, float *d) { 15 #pragma omp for simd 16 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 17 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 18 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 5 19 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 20 // CHECK: [[TRUE]] 21 // CHECK: br label %[[SWITCH:[^,]+]] 22 // CHECK: [[FALSE]] 23 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 24 // CHECK: br label %[[SWITCH]] 25 // CHECK: [[SWITCH]] 26 // CHECK: [[UP:%.+]] = phi i32 [ 5, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 27 // CHECK: store i32 [[UP]], i32* [[UB]], 28 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 29 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]], 30 31 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID:[0-9]+]] 32 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 33 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]] 34 // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]] 35 for (int i = 3; i < 32; i += 5) { 36 // CHECK: [[SIMPLE_LOOP1_BODY]] 37 // Start of body: calculate i from IV: 38 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 39 // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5 40 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]] 41 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 42 // ... loop body ... 43 // End of body: store into a[i]: 44 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 45 a[i] = b[i] * c[i] * d[i]; 46 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 47 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 48 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 49 // br label %{{.+}}, !llvm.loop !{{.+}} 50 } 51 // CHECK: [[SIMPLE_LOOP1_END]] 52 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 53 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 54 55 long long k = get_val(); 56 57 #pragma omp for simd linear(k : 3) schedule(dynamic) 58 // CHECK: [[K0:%.+]] = call {{.*}}i64 @{{.*}}get_val 59 // CHECK-NEXT: store i64 [[K0]], i64* [[K_VAR:%[^,]+]] 60 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR]] 61 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]] 62 63 // CHECK: call void @__kmpc_dispatch_init_4(%ident_t* {{.+}}, i32 %{{.+}}, i32 35, i32 0, i32 8, i32 1, i32 1) 64 // CHECK: [[NEXT:%.+]] = call i32 @__kmpc_dispatch_next_4(%ident_t* {{.+}}, i32 %{{.+}}, i32* %{{.+}}, i32* [[LB:%.+]], i32* [[UB:%.+]], i32* %{{.+}}) 65 // CHECK: [[COND:%.+]] = icmp ne i32 [[NEXT]], 0 66 // CHECK: br i1 [[COND]], label %[[CONT:.+]], label %[[END:.+]] 67 // CHECK: [[CONT]] 68 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 69 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV2:%[^,]+]], 70 71 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]] 72 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 73 // CHECK-NEXT: [[CMP2:%.+]] = icmp sle i32 [[IV2]], [[UB_VAL]] 74 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]] 75 for (int i = 10; i > 1; i--) { 76 // CHECK: [[SIMPLE_LOOP2_BODY]] 77 // Start of body: calculate i from IV: 78 // CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 79 // FIXME: It is interesting, why the following "mul 1" was not constant folded? 80 // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1 81 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]] 82 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 83 // 84 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 85 // CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 86 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3 87 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 88 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] 89 // Update of the privatized version of linear variable! 90 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] 91 a[k]++; 92 k = k + 3; 93 // CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 94 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 95 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 96 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]] 97 } 98 // CHECK: [[SIMPLE_LOOP2_END]] 99 // 100 // Update linear vars after loop, as the loop was operating on a private version. 101 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]] 102 // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27 103 // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_VAR]] 104 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 105 106 int lin = 12; 107 #pragma omp for simd linear(lin : get_val()), linear(g_ptr) 108 109 // Init linear private var. 110 // CHECK: store i32 12, i32* [[LIN_VAR:%[^,]+]] 111 // CHECK: [[LIN_LOAD:%.+]] = load i32, i32* [[LIN_VAR]] 112 // CHECK-NEXT: store i32 [[LIN_LOAD]], i32* [[LIN_START:%[^,]+]] 113 // Remember linear step. 114 // CHECK: [[CALL_VAL:%.+]] = invoke 115 // CHECK: store i64 [[CALL_VAL]], i64* [[LIN_STEP:%[^,]+]] 116 117 // CHECK: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR:@[^,]+]] 118 // CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]] 119 120 // CHECK: call void @__kmpc_for_static_init_8u(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 121 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 122 // CHECK: [[CMP:%.+]] = icmp ugt i64 [[UB_VAL]], 3 123 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 124 // CHECK: [[TRUE]] 125 // CHECK: br label %[[SWITCH:[^,]+]] 126 // CHECK: [[FALSE]] 127 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 128 // CHECK: br label %[[SWITCH]] 129 // CHECK: [[SWITCH]] 130 // CHECK: [[UP:%.+]] = phi i64 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 131 // CHECK: store i64 [[UP]], i64* [[UB]], 132 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 133 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV3:%[^,]+]], 134 135 // CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID:[0-9]+]] 136 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 137 // CHECK-NEXT: [[CMP3:%.+]] = icmp ule i64 [[IV3]], [[UB_VAL]] 138 // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]] 139 for (unsigned long long it = 2000; it >= 600; it-=400) { 140 // CHECK: [[SIMPLE_LOOP3_BODY]] 141 // Start of body: calculate it from IV: 142 // CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 143 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400 144 // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]] 145 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 146 // 147 // Linear start and step are used to calculate current value of the linear variable. 148 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 149 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 150 // CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 151 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 152 // CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 153 // CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1 154 // CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]] 155 // CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 156 *g_ptr++ = 0.0; 157 // CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 158 // CHECK: store double{{.*}}[[GEP_VAL]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 159 a[it + lin]++; 160 // CHECK: [[FLT_INC:%.+]] = fadd float 161 // CHECK-NEXT: store float [[FLT_INC]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 162 // CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 163 // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1 164 // CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 165 } 166 // CHECK: [[SIMPLE_LOOP3_END]] 167 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 168 // 169 // Linear start and step are used to calculate final value of the linear variables. 170 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]] 171 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]] 172 // CHECK: store i32 {{.+}}, i32* [[LIN_VAR]], 173 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]] 174 // CHECK: store double* {{.*}}[[GLIN_VAR]] 175 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 176 177 #pragma omp for simd 178 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 179 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 180 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 3 181 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 182 // CHECK: [[TRUE]] 183 // CHECK: br label %[[SWITCH:[^,]+]] 184 // CHECK: [[FALSE]] 185 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 186 // CHECK: br label %[[SWITCH]] 187 // CHECK: [[SWITCH]] 188 // CHECK: [[UP:%.+]] = phi i32 [ 3, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 189 // CHECK: store i32 [[UP]], i32* [[UB]], 190 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 191 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV4:%[^,]+]], 192 193 // CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID:[0-9]+]] 194 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 195 // CHECK-NEXT: [[CMP4:%.+]] = icmp sle i32 [[IV4]], [[UB_VAL]] 196 // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]] 197 for (short it = 6; it <= 20; it-=-4) { 198 // CHECK: [[SIMPLE_LOOP4_BODY]] 199 // Start of body: calculate it from IV: 200 // CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 201 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4 202 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]] 203 // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16 204 // CHECK-NEXT: store i16 [[LC_IT_3]], i16* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 205 206 // CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 207 // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1 208 // CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 209 } 210 // CHECK: [[SIMPLE_LOOP4_END]] 211 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 212 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 213 214 #pragma omp for simd 215 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 216 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 217 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], 25 218 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 219 // CHECK: [[TRUE]] 220 // CHECK: br label %[[SWITCH:[^,]+]] 221 // CHECK: [[FALSE]] 222 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 223 // CHECK: br label %[[SWITCH]] 224 // CHECK: [[SWITCH]] 225 // CHECK: [[UP:%.+]] = phi i32 [ 25, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 226 // CHECK: store i32 [[UP]], i32* [[UB]], 227 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 228 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV5:%[^,]+]], 229 230 // CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID:[0-9]+]] 231 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 232 // CHECK-NEXT: [[CMP5:%.+]] = icmp sle i32 [[IV5]], [[UB_VAL]] 233 // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]] 234 for (unsigned char it = 'z'; it >= 'a'; it+=-1) { 235 // CHECK: [[SIMPLE_LOOP5_BODY]] 236 // Start of body: calculate it from IV: 237 // CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 238 // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1 239 // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]] 240 // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8 241 // CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 242 243 // CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 244 // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1 245 // CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 246 } 247 // CHECK: [[SIMPLE_LOOP5_END]] 248 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 249 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 250 251 // CHECK-NOT: mul i32 %{{.+}}, 10 252 #pragma omp for simd 253 for (unsigned i=100; i<10; i+=10) { 254 } 255 256 int A; 257 #pragma omp parallel 258 { 259 // CHECK: store i32 -1, i32* [[A:%.+]], 260 A = -1; 261 #pragma omp for simd lastprivate(A) 262 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 263 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 264 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6 265 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 266 // CHECK: [[TRUE]] 267 // CHECK: br label %[[SWITCH:[^,]+]] 268 // CHECK: [[FALSE]] 269 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 270 // CHECK: br label %[[SWITCH]] 271 // CHECK: [[SWITCH]] 272 // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 273 // CHECK: store i64 [[UP]], i64* [[UB]], 274 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 275 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV7:%[^,]+]], 276 277 // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]] 278 // CHECK: [[SIMD_LOOP7_COND]] 279 // CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID:[0-9]+]] 280 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 281 // CHECK-NEXT: [[CMP7:%.+]] = icmp sle i64 [[IV7]], [[UB_VAL]] 282 // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]] 283 for (long long i = -10; i < 10; i += 3) { 284 // CHECK: [[SIMPLE_LOOP7_BODY]] 285 // Start of body: calculate i from IV: 286 // CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 287 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3 288 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] 289 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 290 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 291 // CHECK-NEXT: [[CONV:%.+]] = trunc i64 [[LC_VAL]] to i32 292 // CHECK-NEXT: store i32 [[CONV]], i32* [[A_PRIV:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 293 A = i; 294 // CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 295 // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1 296 // CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 297 } 298 // CHECK: [[SIMPLE_LOOP7_END]] 299 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 300 // CHECK: load i32, i32* 301 // CHECK: icmp ne i32 %{{.+}}, 0 302 // CHECK: br i1 %{{.+}}, label 303 // CHECK: [[A_PRIV_VAL:%.+]] = load i32, i32* [[A_PRIV]], 304 // CHECK-NEXT: store i32 [[A_PRIV_VAL]], i32* %{{.+}}, 305 // CHECK-NEXT: br label 306 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 307 } 308 int R; 309 #pragma omp parallel 310 { 311 // CHECK: store i32 -1, i32* [[R:%[^,]+]], 312 R = -1; 313 // CHECK: store i32 1, i32* [[R_PRIV:%[^,]+]], 314 #pragma omp for simd reduction(*:R) 315 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 316 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 317 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 6 318 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 319 // CHECK: [[TRUE]] 320 // CHECK: br label %[[SWITCH:[^,]+]] 321 // CHECK: [[FALSE]] 322 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 323 // CHECK: br label %[[SWITCH]] 324 // CHECK: [[SWITCH]] 325 // CHECK: [[UP:%.+]] = phi i64 [ 6, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 326 // CHECK: store i64 [[UP]], i64* [[UB]], 327 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 328 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV8:%[^,]+]], 329 330 // CHECK: br label %[[SIMD_LOOP8_COND:[^,]+]] 331 // CHECK: [[SIMD_LOOP8_COND]] 332 // CHECK-NEXT: [[IV8:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID:[0-9]+]] 333 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] 334 // CHECK-NEXT: [[CMP8:%.+]] = icmp sle i64 [[IV8]], [[UB_VAL]] 335 // CHECK-NEXT: br i1 [[CMP8]], label %[[SIMPLE_LOOP8_BODY:.+]], label %[[SIMPLE_LOOP8_END:[^,]+]] 336 for (long long i = -10; i < 10; i += 3) { 337 // CHECK: [[SIMPLE_LOOP8_BODY]] 338 // Start of body: calculate i from IV: 339 // CHECK: [[IV8_0:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] 340 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV8_0]], 3 341 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] 342 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* [[LC:%[^,]+]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] 343 // CHECK-NEXT: [[LC_VAL:%.+]] = load i64, i64* [[LC]]{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] 344 // CHECK: store i32 %{{.+}}, i32* [[R_PRIV]],{{.+}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] 345 R *= i; 346 // CHECK: [[IV8_2:%.+]] = load i64, i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] 347 // CHECK-NEXT: [[ADD8_2:%.+]] = add nsw i64 [[IV8_2]], 1 348 // CHECK-NEXT: store i64 [[ADD8_2]], i64* [[OMP_IV8]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP8_ID]] 349 } 350 // CHECK: [[SIMPLE_LOOP8_END]] 351 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 352 // CHECK: call i32 @__kmpc_reduce( 353 // CHECK: [[R_PRIV_VAL:%.+]] = load i32, i32* [[R_PRIV]], 354 // CHECK: [[RED:%.+]] = mul nsw i32 %{{.+}}, [[R_PRIV_VAL]] 355 // CHECK-NEXT: store i32 [[RED]], i32* %{{.+}}, 356 // CHECK-NEXT: call void @__kmpc_end_reduce( 357 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 358 } 359 } 360 361 template <class T, unsigned K> T tfoo(T a) { return a + K; } 362 363 template <typename T, unsigned N> 364 int templ1(T a, T *z) { 365 #pragma omp for simd collapse(N) 366 for (int i = 0; i < N * 2; i++) { 367 for (long long j = 0; j < (N + N + N + N); j += 2) { 368 z[i + j] = a + tfoo<T, N>(i + j); 369 } 370 } 371 return 0; 372 } 373 374 // Instatiation templ1<float,2> 375 // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}}) 376 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 377 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 378 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], 15 379 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 380 // CHECK: [[TRUE]] 381 // CHECK: br label %[[SWITCH:[^,]+]] 382 // CHECK: [[FALSE]] 383 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 384 // CHECK: br label %[[SWITCH]] 385 // CHECK: [[SWITCH]] 386 // CHECK: [[UP:%.+]] = phi i64 [ 15, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 387 // CHECK: store i64 [[UP]], i64* [[UB]], 388 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 389 // CHECK: store i64 [[LB_VAL]], i64* [[T1_OMP_IV:%[^,]+]], 390 391 // ... 392 // CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID:[0-9]+]] 393 // CHECK-NEXT: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 394 // CHECK-NEXT: [[CMP1:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]] 395 // CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]] 396 // CHECK: [[T1_BODY]] 397 // Loop counters i and j updates: 398 // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 399 // CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4 400 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1 401 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]] 402 // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32 403 // CHECK-NEXT: store i32 [[I_2]], i32* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 404 // CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 405 // CHECK-NEXT: [[J_1:%.+]] = srem i64 [[IV2]], 4 406 // CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2 407 // CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]] 408 // CHECK-NEXT: store i64 [[J_2_ADD0]], i64* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 409 // simd.for.inc: 410 // CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 411 // CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1 412 // CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 413 // CHECK-NEXT: br label {{%.+}} 414 // CHECK: [[T1_END]] 415 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 416 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 417 // CHECK: ret i32 0 418 // 419 void inst_templ1() { 420 float a; 421 float z[100]; 422 templ1<float,2> (a, z); 423 } 424 425 426 typedef int MyIdx; 427 428 class IterDouble { 429 double *Ptr; 430 public: 431 IterDouble operator++ () const { 432 IterDouble n; 433 n.Ptr = Ptr + 1; 434 return n; 435 } 436 bool operator < (const IterDouble &that) const { 437 return Ptr < that.Ptr; 438 } 439 double & operator *() const { 440 return *Ptr; 441 } 442 MyIdx operator - (const IterDouble &that) const { 443 return (MyIdx) (Ptr - that.Ptr); 444 } 445 IterDouble operator + (int Delta) { 446 IterDouble re; 447 re.Ptr = Ptr + Delta; 448 return re; 449 } 450 451 ///~IterDouble() {} 452 }; 453 454 // CHECK-LABEL: define {{.*void}} @{{.*}}iter_simple{{.*}} 455 void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) { 456 // 457 // Calculate number of iterations before the loop body. 458 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}} 459 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1 460 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1 461 // CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1 462 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1 463 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}} 464 #pragma omp for simd 465 466 // CHECK: call void @__kmpc_for_static_init_4(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 467 // CHECK-DAG: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 468 // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]], 469 // CHECK: [[CMP:%.+]] = icmp sgt i32 [[UB_VAL]], [[OMP_LAST_IT_VAL]] 470 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 471 // CHECK: [[TRUE]] 472 // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i32, i32* [[OMP_LAST_IT]], 473 // CHECK: br label %[[SWITCH:[^,]+]] 474 // CHECK: [[FALSE]] 475 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 476 // CHECK: br label %[[SWITCH]] 477 // CHECK: [[SWITCH]] 478 // CHECK: [[UP:%.+]] = phi i32 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 479 // CHECK: store i32 [[UP]], i32* [[UB]], 480 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 481 // CHECK: store i32 [[LB_VAL]], i32* [[IT_OMP_IV:%[^,]+]], 482 483 // CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}} !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID:[0-9]+]] 484 // CHECK-NEXT: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 485 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i32 [[IV]], [[UB_VAL]] 486 // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]] 487 for (IterDouble i = ia; i < ib; ++i) { 488 // CHECK: [[IT_BODY]] 489 // Start of body: calculate i from index: 490 // CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 491 // Call of operator+ (i, IV). 492 // CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}} 493 // ... loop body ... 494 *i = *ic * 0.5; 495 // Float multiply and save result. 496 // CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01 497 // CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}} 498 // CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]], !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 499 ++ic; 500 // 501 // CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 502 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1 503 // CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 504 // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]] 505 } 506 // CHECK: [[IT_END]] 507 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 508 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 509 // CHECK: ret void 510 } 511 512 513 // CHECK-LABEL: define {{.*void}} @{{.*}}collapsed{{.*}} 514 void collapsed(float *a, float *b, float *c, float *d) { 515 int i; // outer loop counter 516 unsigned j; // middle loop couter, leads to unsigned icmp in loop header. 517 // k declared in the loop init below 518 short l; // inner loop counter 519 // CHECK: call void @__kmpc_for_static_init_4u(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i32* [[LB:%[^,]+]], i32* [[UB:%[^,]+]], i32* [[STRIDE:%[^,]+]], i32 1, i32 1) 520 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 521 // CHECK: [[CMP:%.+]] = icmp ugt i32 [[UB_VAL]], 119 522 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 523 // CHECK: [[TRUE]] 524 // CHECK: br label %[[SWITCH:[^,]+]] 525 // CHECK: [[FALSE]] 526 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]], 527 // CHECK: br label %[[SWITCH]] 528 // CHECK: [[SWITCH]] 529 // CHECK: [[UP:%.+]] = phi i32 [ 119, %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 530 // CHECK: store i32 [[UP]], i32* [[UB]], 531 // CHECK: [[LB_VAL:%.+]] = load i32, i32* [[LB]], 532 // CHECK: store i32 [[LB_VAL]], i32* [[OMP_IV:%[^,]+]], 533 // 534 #pragma omp for simd collapse(4) 535 536 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID:[0-9]+]] 537 // CHECK: [[UB_VAL:%.+]] = load i32, i32* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 538 // CHECK-NEXT: [[CMP:%.+]] = icmp ule i32 [[IV]], [[UB_VAL]] 539 // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]] 540 for (i = 1; i < 3; i++) // 2 iterations 541 for (j = 2u; j < 5u; j++) //3 iterations 542 for (int k = 3; k <= 6; k++) // 4 iterations 543 for (l = 4; l < 9; ++l) // 5 iterations 544 { 545 // CHECK: [[COLL1_BODY]] 546 // Start of body: calculate i from index: 547 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 548 // Calculation of the loop counters values. 549 // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60 550 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1 551 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]] 552 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] 553 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 554 // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20 555 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3 556 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1 557 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]] 558 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]] 559 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 560 // CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5 561 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4 562 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1 563 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]] 564 // CHECK-NEXT: store i32 [[CALC_K_3]], i32* [[LC_K:.+]] 565 // CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 566 // CHECK-NEXT: [[CALC_L_1:%.+]] = urem i32 [[IV1_4]], 5 567 // CHECK-NEXT: [[CALC_L_1_MUL1:%.+]] = mul i32 [[CALC_L_1]], 1 568 // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[CALC_L_1_MUL1]] 569 // CHECK-NEXT: [[CALC_L_3:%.+]] = trunc i32 [[CALC_L_2]] to i16 570 // CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]] 571 // ... loop body ... 572 // End of body: store into a[i]: 573 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 574 float res = b[j] * c[k]; 575 a[i] = res * d[l]; 576 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 577 // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1 578 // CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 579 // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]] 580 // CHECK: [[COLL1_END]] 581 } 582 // i,j,l are updated; k is not updated. 583 // CHECK: call void @__kmpc_for_static_fini(%ident_t* {{.+}}, i32 %{{.+}}) 584 // CHECK-NEXT: store i32 3, i32* [[I:%[^,]+]] 585 // CHECK-NEXT: store i32 5, i32* [[I:%[^,]+]] 586 // CHECK-NEXT: store i16 9, i16* [[I:%[^,]+]] 587 // CHECK: call void @__kmpc_barrier(%ident_t* {{.+}}, i32 %{{.+}}) 588 // CHECK: ret void 589 } 590 591 extern char foo(); 592 extern double globalfloat; 593 594 // CHECK-LABEL: define {{.*void}} @{{.*}}widened{{.*}} 595 void widened(float *a, float *b, float *c, float *d) { 596 int i; // outer loop counter 597 short j; // inner loop counter 598 globalfloat = 1.0; 599 int localint = 1; 600 // CHECK: store double {{.+}}, double* [[GLOBALFLOAT:@.+]] 601 // Counter is widened to 64 bits. 602 // CHECK: [[MUL:%.+]] = mul nsw i64 2, %{{.+}} 603 // CHECK-NEXT: [[SUB:%.+]] = sub nsw i64 [[MUL]], 1 604 // CHECK-NEXT: store i64 [[SUB]], i64* [[OMP_LAST_IT:%[^,]+]], 605 // CHECK: call void @__kmpc_for_static_init_8(%ident_t* {{[^,]+}}, i32 %{{[^,]+}}, i32 34, i32* %{{[^,]+}}, i64* [[LB:%[^,]+]], i64* [[UB:%[^,]+]], i64* [[STRIDE:%[^,]+]], i64 1, i64 1) 606 // CHECK-DAG: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 607 // CHECK-DAG: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]], 608 // CHECK: [[CMP:%.+]] = icmp sgt i64 [[UB_VAL]], [[OMP_LAST_IT_VAL]] 609 // CHECK: br i1 [[CMP]], label %[[TRUE:.+]], label %[[FALSE:[^,]+]] 610 // CHECK: [[TRUE]] 611 // CHECK: [[OMP_LAST_IT_VAL:%.+]] = load i64, i64* [[OMP_LAST_IT]], 612 // CHECK: br label %[[SWITCH:[^,]+]] 613 // CHECK: [[FALSE]] 614 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]], 615 // CHECK: br label %[[SWITCH]] 616 // CHECK: [[SWITCH]] 617 // CHECK: [[UP:%.+]] = phi i64 [ [[OMP_LAST_IT_VAL]], %[[TRUE]] ], [ [[UB_VAL]], %[[FALSE]] ] 618 // CHECK: store i64 [[UP]], i64* [[UB]], 619 // CHECK: [[LB_VAL:%.+]] = load i64, i64* [[LB]], 620 // CHECK: store i64 [[LB_VAL]], i64* [[OMP_IV:%[^,]+]], 621 // 622 #pragma omp for simd collapse(2) private(globalfloat, localint) 623 624 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID:[0-9]+]] 625 // CHECK: [[UB_VAL:%.+]] = load i64, i64* [[UB]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 626 // CHECK-NEXT: [[CMP:%.+]] = icmp sle i64 [[IV]], [[UB_VAL]] 627 // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]] 628 for (i = 1; i < 3; i++) // 2 iterations 629 for (j = 0; j < foo(); j++) // foo() iterations 630 { 631 // CHECK: [[WIDE1_BODY]] 632 // Start of body: calculate i from index: 633 // CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 634 // Calculation of the loop counters values... 635 // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]] 636 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 637 // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]] 638 // ... loop body ... 639 // 640 // Here we expect store into private double var, not global 641 // CHECK-NOT: store double {{.+}}, double* [[GLOBALFLOAT]] 642 globalfloat = (float)j/i; 643 float res = b[j] * c[j]; 644 // Store into a[i]: 645 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 646 a[i] = res * d[i]; 647 // Then there's a store into private var localint: 648 // CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 649 localint = (int)j; 650 // CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 651 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1 652 // CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 653 // 654 // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]] 655 // CHECK: [[WIDE1_END]] 656 } 657 // i,j are updated. 658 // CHECK: store i32 3, i32* [[I:%[^,]+]] 659 // CHECK: store i16 660 // 661 // Here we expect store into original localint, not its privatized version. 662 // CHECK-NOT: store i32 {{.+}}, i32* [[LOCALINT]] 663 localint = (int)j; 664 // CHECK: ret void 665 } 666 667 // TERM_DEBUG-LABEL: bar 668 int bar() {return 0;}; 669 670 // TERM_DEBUG-LABEL: parallel_simd 671 void parallel_simd(float *a) { 672 #pragma omp parallel 673 #pragma omp for simd 674 // TERM_DEBUG-NOT: __kmpc_global_thread_num 675 // TERM_DEBUG: invoke i32 {{.*}}bar{{.*}}() 676 // TERM_DEBUG: unwind label %[[TERM_LPAD:.+]], 677 // TERM_DEBUG-NOT: __kmpc_global_thread_num 678 // TERM_DEBUG: [[TERM_LPAD]] 679 // TERM_DEBUG: call void @__clang_call_terminate 680 // TERM_DEBUG: unreachable 681 for (unsigned i = 131071; i <= 2147483647; i += 127) 682 a[i] += bar(); 683 } 684 // TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]], 685 #endif // HEADER 686 687