1 // RUN: %clang_cc1 -verify -fopenmp=libiomp5 -x c++ -emit-llvm %s -fexceptions -fcxx-exceptions -o - | FileCheck %s 2 // RUN: %clang_cc1 -fopenmp=libiomp5 -x c++ -std=c++11 -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -emit-pch -o %t %s 3 // RUN: %clang_cc1 -fopenmp=libiomp5 -x c++ -triple x86_64-unknown-unknown -fexceptions -fcxx-exceptions -g -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s 4 // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp=libiomp5 -fexceptions -fcxx-exceptions -gline-tables-only -x c++ -emit-llvm %s -o - | FileCheck %s --check-prefix=TERM_DEBUG 5 // 6 // expected-no-diagnostics 7 #ifndef HEADER 8 #define HEADER 9 10 long long get_val() { return 0; } 11 double *g_ptr; 12 13 // CHECK-LABEL: define {{.*void}} @{{.*}}simple{{.*}}(float* {{.+}}, float* {{.+}}, float* {{.+}}, float* {{.+}}) 14 void simple(float *a, float *b, float *c, float *d) { 15 #pragma omp simd 16 // CHECK: store i32 0, i32* [[OMP_IV:%[^,]+]] 17 18 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID:[0-9]+]] 19 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i32 [[IV]], 6 20 // CHECK-NEXT: br i1 [[CMP]], label %[[SIMPLE_LOOP1_BODY:.+]], label %[[SIMPLE_LOOP1_END:[^,]+]] 21 for (int i = 3; i < 32; i += 5) { 22 // CHECK: [[SIMPLE_LOOP1_BODY]] 23 // Start of body: calculate i from IV: 24 // CHECK: [[IV1_1:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 25 // CHECK: [[CALC_I_1:%.+]] = mul nsw i32 [[IV1_1]], 5 26 // CHECK-NEXT: [[CALC_I_2:%.+]] = add nsw i32 3, [[CALC_I_1]] 27 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 28 // ... loop body ... 29 // End of body: store into a[i]: 30 // CHECK: store float [[RESULT:%.+]], float* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 31 a[i] = b[i] * c[i] * d[i]; 32 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 33 // CHECK-NEXT: [[ADD1_2:%.+]] = add nsw i32 [[IV1_2]], 1 34 // CHECK-NEXT: store i32 [[ADD1_2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP1_ID]] 35 // br label %{{.+}}, !llvm.loop !{{.+}} 36 } 37 // CHECK: [[SIMPLE_LOOP1_END]] 38 39 long long k = get_val(); 40 41 #pragma omp simd linear(k : 3) 42 // CHECK: [[K0:%.+]] = call {{.*}}i64 @{{.*}}get_val 43 // CHECK-NEXT: store i64 [[K0]], i64* [[K_VAR:%[^,]+]] 44 // CHECK: [[K0LOAD:%.+]] = load i64, i64* [[K_VAR]] 45 // CHECK-NEXT: store i64 [[K0LOAD]], i64* [[LIN0:%[^,]+]] 46 // CHECK: store i32 0, i32* [[OMP_IV2:%[^,]+]] 47 48 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID:[0-9]+]] 49 // CHECK-NEXT: [[CMP2:%.+]] = icmp slt i32 [[IV2]], 9 50 // CHECK-NEXT: br i1 [[CMP2]], label %[[SIMPLE_LOOP2_BODY:.+]], label %[[SIMPLE_LOOP2_END:[^,]+]] 51 for (int i = 10; i > 1; i--) { 52 // CHECK: [[SIMPLE_LOOP2_BODY]] 53 // Start of body: calculate i from IV: 54 // CHECK: [[IV2_0:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 55 // FIXME: It is interesting, why the following "mul 1" was not constant folded? 56 // CHECK-NEXT: [[IV2_1:%.+]] = mul nsw i32 [[IV2_0]], 1 57 // CHECK-NEXT: [[LC_I_1:%.+]] = sub nsw i32 10, [[IV2_1]] 58 // CHECK-NEXT: store i32 [[LC_I_1]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 59 // 60 // CHECK-NEXT: [[LIN0_1:%.+]] = load i64, i64* [[LIN0]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 61 // CHECK-NEXT: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 62 // CHECK-NEXT: [[LIN_MUL1:%.+]] = mul nsw i32 [[IV2_2]], 3 63 // CHECK-NEXT: [[LIN_EXT1:%.+]] = sext i32 [[LIN_MUL1]] to i64 64 // CHECK-NEXT: [[LIN_ADD1:%.+]] = add nsw i64 [[LIN0_1]], [[LIN_EXT1]] 65 // Update of the privatized version of linear variable! 66 // CHECK-NEXT: store i64 [[LIN_ADD1]], i64* [[K_PRIVATIZED:%[^,]+]] 67 a[k]++; 68 k = k + 3; 69 // CHECK: [[IV2_2:%.+]] = load i32, i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 70 // CHECK-NEXT: [[ADD2_2:%.+]] = add nsw i32 [[IV2_2]], 1 71 // CHECK-NEXT: store i32 [[ADD2_2]], i32* [[OMP_IV2]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP2_ID]] 72 // br label {{.+}}, !llvm.loop ![[SIMPLE_LOOP2_ID]] 73 } 74 // CHECK: [[SIMPLE_LOOP2_END]] 75 // 76 // Update linear vars after loop, as the loop was operating on a private version. 77 // CHECK: [[LIN0_2:%.+]] = load i64, i64* [[LIN0]] 78 // CHECK-NEXT: [[LIN_ADD2:%.+]] = add nsw i64 [[LIN0_2]], 27 79 // CHECK-NEXT: store i64 [[LIN_ADD2]], i64* [[K_VAR]] 80 // 81 82 int lin = 12; 83 #pragma omp simd linear(lin : get_val()), linear(g_ptr) 84 85 // Init linear private var. 86 // CHECK: store i32 12, i32* [[LIN_VAR:%[^,]+]] 87 // CHECK: [[LIN_LOAD:%.+]] = load i32, i32* [[LIN_VAR]] 88 // CHECK-NEXT: store i32 [[LIN_LOAD]], i32* [[LIN_START:%[^,]+]] 89 // CHECK: [[GLIN_LOAD:%.+]] = load double*, double** [[GLIN_VAR:@[^,]+]] 90 // CHECK-NEXT: store double* [[GLIN_LOAD]], double** [[GLIN_START:%[^,]+]] 91 92 // CHECK: store i64 0, i64* [[OMP_IV3:%[^,]+]] 93 94 // Remember linear step. 95 // CHECK: [[CALL_VAL:%.+]] = invoke 96 // CHECK: store i64 [[CALL_VAL]], i64* [[LIN_STEP:%[^,]+]] 97 98 // CHECK: [[IV3:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID:[0-9]+]] 99 // CHECK-NEXT: [[CMP3:%.+]] = icmp ult i64 [[IV3]], 4 100 // CHECK-NEXT: br i1 [[CMP3]], label %[[SIMPLE_LOOP3_BODY:.+]], label %[[SIMPLE_LOOP3_END:[^,]+]] 101 for (unsigned long long it = 2000; it >= 600; it-=400) { 102 // CHECK: [[SIMPLE_LOOP3_BODY]] 103 // Start of body: calculate it from IV: 104 // CHECK: [[IV3_0:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 105 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul i64 [[IV3_0]], 400 106 // CHECK-NEXT: [[LC_IT_2:%.+]] = sub i64 2000, [[LC_IT_1]] 107 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 108 // 109 // Linear start and step are used to calculate current value of the linear variable. 110 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 111 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 112 // CHECK-NOT: store i32 {{.+}}, i32* [[LIN_VAR]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 113 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 114 // CHECK-NEXT: [[IV3_1:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 115 // CHECK-NEXT: [[MUL:%.+]] = mul i64 [[IV3_1]], 1 116 // CHECK: [[GEP:%.+]] = getelementptr{{.*}}[[GLINSTART]] 117 // CHECK-NEXT: store double* [[GEP]], double** [[G_PTR_CUR:%[^,]+]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 118 *g_ptr++ = 0.0; 119 // CHECK: [[GEP_VAL:%.+]] = load double{{.*}}[[G_PTR_CUR]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 120 // CHECK: store double{{.*}}[[GEP_VAL]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 121 a[it + lin]++; 122 // CHECK: [[FLT_INC:%.+]] = fadd float 123 // CHECK-NEXT: store float [[FLT_INC]],{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 124 // CHECK: [[IV3_2:%.+]] = load i64, i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 125 // CHECK-NEXT: [[ADD3_2:%.+]] = add i64 [[IV3_2]], 1 126 // CHECK-NEXT: store i64 [[ADD3_2]], i64* [[OMP_IV3]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP3_ID]] 127 } 128 // CHECK: [[SIMPLE_LOOP3_END]] 129 // 130 // Linear start and step are used to calculate final value of the linear variables. 131 // CHECK: [[LINSTART:.+]] = load i32, i32* [[LIN_START]] 132 // CHECK: [[LINSTEP:.+]] = load i64, i64* [[LIN_STEP]] 133 // CHECK: store i32 {{.+}}, i32* [[LIN_VAR]], 134 // CHECK: [[GLINSTART:.+]] = load double*, double** [[GLIN_START]] 135 // CHECK: store double* {{.*}}[[GLIN_VAR]] 136 137 #pragma omp simd 138 // CHECK: store i32 0, i32* [[OMP_IV4:%[^,]+]] 139 140 // CHECK: [[IV4:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID:[0-9]+]] 141 // CHECK-NEXT: [[CMP4:%.+]] = icmp slt i32 [[IV4]], 4 142 // CHECK-NEXT: br i1 [[CMP4]], label %[[SIMPLE_LOOP4_BODY:.+]], label %[[SIMPLE_LOOP4_END:[^,]+]] 143 for (short it = 6; it <= 20; it-=-4) { 144 // CHECK: [[SIMPLE_LOOP4_BODY]] 145 // Start of body: calculate it from IV: 146 // CHECK: [[IV4_0:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 147 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV4_0]], 4 148 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 6, [[LC_IT_1]] 149 // CHECK-NEXT: [[LC_IT_3:%.+]] = trunc i32 [[LC_IT_2]] to i16 150 // CHECK-NEXT: store i16 [[LC_IT_3]], i16* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 151 152 // CHECK: [[IV4_2:%.+]] = load i32, i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 153 // CHECK-NEXT: [[ADD4_2:%.+]] = add nsw i32 [[IV4_2]], 1 154 // CHECK-NEXT: store i32 [[ADD4_2]], i32* [[OMP_IV4]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP4_ID]] 155 } 156 // CHECK: [[SIMPLE_LOOP4_END]] 157 158 #pragma omp simd 159 // CHECK: store i32 0, i32* [[OMP_IV5:%[^,]+]] 160 161 // CHECK: [[IV5:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID:[0-9]+]] 162 // CHECK-NEXT: [[CMP5:%.+]] = icmp slt i32 [[IV5]], 26 163 // CHECK-NEXT: br i1 [[CMP5]], label %[[SIMPLE_LOOP5_BODY:.+]], label %[[SIMPLE_LOOP5_END:[^,]+]] 164 for (unsigned char it = 'z'; it >= 'a'; it+=-1) { 165 // CHECK: [[SIMPLE_LOOP5_BODY]] 166 // Start of body: calculate it from IV: 167 // CHECK: [[IV5_0:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 168 // CHECK-NEXT: [[IV5_1:%.+]] = mul nsw i32 [[IV5_0]], 1 169 // CHECK-NEXT: [[LC_IT_1:%.+]] = sub nsw i32 122, [[IV5_1]] 170 // CHECK-NEXT: [[LC_IT_2:%.+]] = trunc i32 [[LC_IT_1]] to i8 171 // CHECK-NEXT: store i8 [[LC_IT_2]], i8* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 172 173 // CHECK: [[IV5_2:%.+]] = load i32, i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 174 // CHECK-NEXT: [[ADD5_2:%.+]] = add nsw i32 [[IV5_2]], 1 175 // CHECK-NEXT: store i32 [[ADD5_2]], i32* [[OMP_IV5]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP5_ID]] 176 } 177 // CHECK: [[SIMPLE_LOOP5_END]] 178 179 #pragma omp simd 180 // FIXME: I think we would get wrong result using 'unsigned' in the loop below. 181 // So we'll need to add zero trip test for 'unsigned' counters. 182 // 183 // CHECK: store i32 0, i32* [[OMP_IV6:%[^,]+]] 184 185 // CHECK: [[IV6:%.+]] = load i32, i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID:[0-9]+]] 186 // CHECK-NEXT: [[CMP6:%.+]] = icmp slt i32 [[IV6]], -8 187 // CHECK-NEXT: br i1 [[CMP6]], label %[[SIMPLE_LOOP6_BODY:.+]], label %[[SIMPLE_LOOP6_END:[^,]+]] 188 for (int i=100; i<10; i+=10) { 189 // CHECK: [[SIMPLE_LOOP6_BODY]] 190 // Start of body: calculate i from IV: 191 // CHECK: [[IV6_0:%.+]] = load i32, i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] 192 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i32 [[IV6_0]], 10 193 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i32 100, [[LC_IT_1]] 194 // CHECK-NEXT: store i32 [[LC_IT_2]], i32* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] 195 196 // CHECK: [[IV6_2:%.+]] = load i32, i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] 197 // CHECK-NEXT: [[ADD6_2:%.+]] = add nsw i32 [[IV6_2]], 1 198 // CHECK-NEXT: store i32 [[ADD6_2]], i32* [[OMP_IV6]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP6_ID]] 199 } 200 // CHECK: [[SIMPLE_LOOP6_END]] 201 202 int A; 203 #pragma omp simd lastprivate(A) 204 // Clause 'lastprivate' implementation is not completed yet. 205 // Test checks that one iteration is separated in presence of lastprivate. 206 // 207 // CHECK: store i64 0, i64* [[OMP_IV7:%[^,]+]] 208 // CHECK: br i1 true, label %[[SIMPLE_IF7_THEN:.+]], label %[[SIMPLE_IF7_END:[^,]+]] 209 // CHECK: [[SIMPLE_IF7_THEN]] 210 // CHECK: br label %[[SIMD_LOOP7_COND:[^,]+]] 211 // CHECK: [[SIMD_LOOP7_COND]] 212 // CHECK-NEXT: [[IV7:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID:[0-9]+]] 213 // CHECK-NEXT: [[CMP7:%.+]] = icmp slt i64 [[IV7]], 6 214 // CHECK-NEXT: br i1 [[CMP7]], label %[[SIMPLE_LOOP7_BODY:.+]], label %[[SIMPLE_LOOP7_END:[^,]+]] 215 for (long long i = -10; i < 10; i += 3) { 216 // CHECK: [[SIMPLE_LOOP7_BODY]] 217 // Start of body: calculate i from IV: 218 // CHECK: [[IV7_0:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 219 // CHECK-NEXT: [[LC_IT_1:%.+]] = mul nsw i64 [[IV7_0]], 3 220 // CHECK-NEXT: [[LC_IT_2:%.+]] = add nsw i64 -10, [[LC_IT_1]] 221 // CHECK-NEXT: store i64 [[LC_IT_2]], i64* {{.+}}, !llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 222 A = i; 223 // CHECK: [[IV7_2:%.+]] = load i64, i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 224 // CHECK-NEXT: [[ADD7_2:%.+]] = add nsw i64 [[IV7_2]], 1 225 // CHECK-NEXT: store i64 [[ADD7_2]], i64* [[OMP_IV7]]{{.*}}!llvm.mem.parallel_loop_access ![[SIMPLE_LOOP7_ID]] 226 } 227 // CHECK: [[SIMPLE_LOOP7_END]] 228 // Separated last iteration. 229 // CHECK: [[IV7_4:%.+]] = load i64, i64* [[OMP_IV7]] 230 // CHECK-NEXT: [[LC_FIN_1:%.+]] = mul nsw i64 [[IV7_4]], 3 231 // CHECK-NEXT: [[LC_FIN_2:%.+]] = add nsw i64 -10, [[LC_FIN_1]] 232 // CHECK-NEXT: store i64 [[LC_FIN_2]], i64* [[ADDR_I:%[^,]+]] 233 // CHECK: [[LOAD_I:%.+]] = load i64, i64* [[ADDR_I]] 234 // CHECK-NEXT: [[CONV_I:%.+]] = trunc i64 [[LOAD_I]] to i32 235 // 236 // CHECK: br label %[[SIMPLE_IF7_END]] 237 // CHECK: [[SIMPLE_IF7_END]] 238 // 239 240 // CHECK: ret void 241 } 242 243 template <class T, unsigned K> T tfoo(T a) { return a + K; } 244 245 template <typename T, unsigned N> 246 int templ1(T a, T *z) { 247 #pragma omp simd collapse(N) 248 for (int i = 0; i < N * 2; i++) { 249 for (long long j = 0; j < (N + N + N + N); j += 2) { 250 z[i + j] = a + tfoo<T, N>(i + j); 251 } 252 } 253 return 0; 254 } 255 256 // Instatiation templ1<float,2> 257 // CHECK-LABEL: define {{.*i32}} @{{.*}}templ1{{.*}}(float {{.+}}, float* {{.+}}) 258 // CHECK: store i64 0, i64* [[T1_OMP_IV:[^,]+]] 259 // ... 260 // CHECK: [[IV:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID:[0-9]+]] 261 // CHECK-NEXT: [[CMP1:%.+]] = icmp slt i64 [[IV]], 16 262 // CHECK-NEXT: br i1 [[CMP1]], label %[[T1_BODY:.+]], label %[[T1_END:[^,]+]] 263 // CHECK: [[T1_BODY]] 264 // Loop counters i and j updates: 265 // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 266 // CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4 267 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1 268 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]] 269 // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32 270 // CHECK-NEXT: store i32 [[I_2]], i32* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 271 // CHECK: [[IV2:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 272 // CHECK-NEXT: [[J_1:%.+]] = srem i64 [[IV2]], 4 273 // CHECK-NEXT: [[J_2:%.+]] = mul nsw i64 [[J_1]], 2 274 // CHECK-NEXT: [[J_2_ADD0:%.+]] = add nsw i64 0, [[J_2]] 275 // CHECK-NEXT: store i64 [[J_2_ADD0]], i64* {{%.+}}{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 276 // simd.for.inc: 277 // CHECK: [[IV3:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 278 // CHECK-NEXT: [[INC:%.+]] = add nsw i64 [[IV3]], 1 279 // CHECK-NEXT: store i64 [[INC]], i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]] 280 // CHECK-NEXT: br label {{%.+}} 281 // CHECK: [[T1_END]] 282 // CHECK: ret i32 0 283 // 284 void inst_templ1() { 285 float a; 286 float z[100]; 287 templ1<float,2> (a, z); 288 } 289 290 291 typedef int MyIdx; 292 293 class IterDouble { 294 double *Ptr; 295 public: 296 IterDouble operator++ () const { 297 IterDouble n; 298 n.Ptr = Ptr + 1; 299 return n; 300 } 301 bool operator < (const IterDouble &that) const { 302 return Ptr < that.Ptr; 303 } 304 double & operator *() const { 305 return *Ptr; 306 } 307 MyIdx operator - (const IterDouble &that) const { 308 return (MyIdx) (Ptr - that.Ptr); 309 } 310 IterDouble operator + (int Delta) { 311 IterDouble re; 312 re.Ptr = Ptr + Delta; 313 return re; 314 } 315 316 ///~IterDouble() {} 317 }; 318 319 // CHECK-LABEL: define {{.*void}} @{{.*}}iter_simple{{.*}} 320 void iter_simple(IterDouble ia, IterDouble ib, IterDouble ic) { 321 // 322 // CHECK: store i32 0, i32* [[IT_OMP_IV:%[^,]+]] 323 // Calculate number of iterations before the loop body. 324 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}} 325 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1 326 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1 327 // CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1 328 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1 329 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}} 330 #pragma omp simd 331 332 // CHECK: [[IV:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}} !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID:[0-9]+]] 333 // CHECK-NEXT: [[LAST_IT:%.+]] = load i32, i32* [[OMP_LAST_IT]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 334 // CHECK-NEXT: [[NUM_IT:%.+]] = add nsw i32 [[LAST_IT]], 1 335 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i32 [[IV]], [[NUM_IT]] 336 // CHECK-NEXT: br i1 [[CMP]], label %[[IT_BODY:[^,]+]], label %[[IT_END:[^,]+]] 337 for (IterDouble i = ia; i < ib; ++i) { 338 // CHECK: [[IT_BODY]] 339 // Start of body: calculate i from index: 340 // CHECK: [[IV1:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 341 // Call of operator+ (i, IV). 342 // CHECK: {{%.+}} = invoke {{.+}} @{{.*}}IterDouble{{.*}} 343 // ... loop body ... 344 *i = *ic * 0.5; 345 // Float multiply and save result. 346 // CHECK: [[MULR:%.+]] = fmul double {{%.+}}, 5.000000e-01 347 // CHECK-NEXT: invoke {{.+}} @{{.*}}IterDouble{{.*}} 348 // CHECK: store double [[MULR:%.+]], double* [[RESULT_ADDR:%.+]], !llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 349 ++ic; 350 // 351 // CHECK: [[IV2:%.+]] = load i32, i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 352 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i32 [[IV2]], 1 353 // CHECK-NEXT: store i32 [[ADD2]], i32* [[IT_OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[ITER_LOOP_ID]] 354 // br label %{{.*}}, !llvm.loop ![[ITER_LOOP_ID]] 355 } 356 // CHECK: [[IT_END]] 357 // CHECK: ret void 358 } 359 360 361 // CHECK-LABEL: define {{.*void}} @{{.*}}collapsed{{.*}} 362 void collapsed(float *a, float *b, float *c, float *d) { 363 int i; // outer loop counter 364 unsigned j; // middle loop couter, leads to unsigned icmp in loop header. 365 // k declared in the loop init below 366 short l; // inner loop counter 367 // CHECK: store i32 0, i32* [[OMP_IV:[^,]+]] 368 // 369 #pragma omp simd collapse(4) 370 371 // CHECK: [[IV:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID:[0-9]+]] 372 // CHECK-NEXT: [[CMP:%.+]] = icmp ult i32 [[IV]], 120 373 // CHECK-NEXT: br i1 [[CMP]], label %[[COLL1_BODY:[^,]+]], label %[[COLL1_END:[^,]+]] 374 for (i = 1; i < 3; i++) // 2 iterations 375 for (j = 2u; j < 5u; j++) //3 iterations 376 for (int k = 3; k <= 6; k++) // 4 iterations 377 for (l = 4; l < 9; ++l) // 5 iterations 378 { 379 // CHECK: [[COLL1_BODY]] 380 // Start of body: calculate i from index: 381 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 382 // Calculation of the loop counters values. 383 // CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60 384 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1 385 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]] 386 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]] 387 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 388 // CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20 389 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3 390 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1 391 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]] 392 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]] 393 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 394 // CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5 395 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4 396 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1 397 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]] 398 // CHECK-NEXT: store i32 [[CALC_K_3]], i32* [[LC_K:.+]] 399 // CHECK: [[IV1_4:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 400 // CHECK-NEXT: [[CALC_L_1:%.+]] = urem i32 [[IV1_4]], 5 401 // CHECK-NEXT: [[CALC_L_1_MUL1:%.+]] = mul i32 [[CALC_L_1]], 1 402 // CHECK-NEXT: [[CALC_L_2:%.+]] = add i32 4, [[CALC_L_1_MUL1]] 403 // CHECK-NEXT: [[CALC_L_3:%.+]] = trunc i32 [[CALC_L_2]] to i16 404 // CHECK-NEXT: store i16 [[CALC_L_3]], i16* [[LC_L:.+]] 405 // ... loop body ... 406 // End of body: store into a[i]: 407 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 408 float res = b[j] * c[k]; 409 a[i] = res * d[l]; 410 // CHECK: [[IV2:%.+]] = load i32, i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 411 // CHECK-NEXT: [[ADD2:%.+]] = add i32 [[IV2]], 1 412 // CHECK-NEXT: store i32 [[ADD2]], i32* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]] 413 // br label %{{[^,]+}}, !llvm.loop ![[COLL1_LOOP_ID]] 414 // CHECK: [[COLL1_END]] 415 } 416 // i,j,l are updated; k is not updated. 417 // CHECK: store i32 3, i32* [[I:%[^,]+]] 418 // CHECK-NEXT: store i32 5, i32* [[I:%[^,]+]] 419 // CHECK-NEXT: store i16 9, i16* [[I:%[^,]+]] 420 // CHECK: ret void 421 } 422 423 extern char foo(); 424 extern double globalfloat; 425 426 // CHECK-LABEL: define {{.*void}} @{{.*}}widened{{.*}} 427 void widened(float *a, float *b, float *c, float *d) { 428 int i; // outer loop counter 429 short j; // inner loop counter 430 globalfloat = 1.0; 431 int localint = 1; 432 // CHECK: store double {{.+}}, double* [[GLOBALFLOAT:@.+]] 433 // Counter is widened to 64 bits. 434 // CHECK: store i64 0, i64* [[OMP_IV:[^,]+]] 435 // 436 #pragma omp simd collapse(2) private(globalfloat, localint) 437 438 // CHECK: [[IV:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID:[0-9]+]] 439 // CHECK-NEXT: [[LI:%.+]] = load i64, i64* [[OMP_LI:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 440 // CHECK-NEXT: [[NUMIT:%.+]] = add nsw i64 [[LI]], 1 441 // CHECK-NEXT: [[CMP:%.+]] = icmp slt i64 [[IV]], [[NUMIT]] 442 // CHECK-NEXT: br i1 [[CMP]], label %[[WIDE1_BODY:[^,]+]], label %[[WIDE1_END:[^,]+]] 443 for (i = 1; i < 3; i++) // 2 iterations 444 for (j = 0; j < foo(); j++) // foo() iterations 445 { 446 // CHECK: [[WIDE1_BODY]] 447 // Start of body: calculate i from index: 448 // CHECK: [[IV1:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 449 // Calculation of the loop counters values... 450 // CHECK: store i32 {{[^,]+}}, i32* [[LC_I:.+]] 451 // CHECK: [[IV1_2:%.+]] = load i64, i64* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 452 // CHECK: store i16 {{[^,]+}}, i16* [[LC_J:.+]] 453 // ... loop body ... 454 // 455 // Here we expect store into private double var, not global 456 // CHECK-NOT: store double {{.+}}, double* [[GLOBALFLOAT]] 457 globalfloat = (float)j/i; 458 float res = b[j] * c[j]; 459 // Store into a[i]: 460 // CHECK: store float [[RESULT:%.+]], float* [[RESULT_ADDR:%.+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 461 a[i] = res * d[i]; 462 // Then there's a store into private var localint: 463 // CHECK: store i32 {{.+}}, i32* [[LOCALINT:%[^,]+]]{{.+}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 464 localint = (int)j; 465 // CHECK: [[IV2:%.+]] = load i64, i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 466 // CHECK-NEXT: [[ADD2:%.+]] = add nsw i64 [[IV2]], 1 467 // CHECK-NEXT: store i64 [[ADD2]], i64* [[OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[WIDE1_LOOP_ID]] 468 // 469 // br label %{{[^,]+}}, !llvm.loop ![[WIDE1_LOOP_ID]] 470 // CHECK: [[WIDE1_END]] 471 } 472 // i,j are updated. 473 // CHECK: store i32 3, i32* [[I:%[^,]+]] 474 // CHECK: store i16 475 // 476 // Here we expect store into original localint, not its privatized version. 477 // CHECK-NOT: store i32 {{.+}}, i32* [[LOCALINT]] 478 localint = (int)j; 479 // CHECK: ret void 480 } 481 482 // TERM_DEBUG-LABEL: bar 483 int bar() {return 0;}; 484 485 // TERM_DEBUG-LABEL: parallel_simd 486 void parallel_simd(float *a) { 487 #pragma omp parallel 488 #pragma omp simd 489 // TERM_DEBUG-NOT: __kmpc_global_thread_num 490 // TERM_DEBUG: invoke i32 {{.*}}bar{{.*}}() 491 // TERM_DEBUG: unwind label %[[TERM_LPAD:.+]], 492 // TERM_DEBUG-NOT: __kmpc_global_thread_num 493 // TERM_DEBUG: [[TERM_LPAD]] 494 // TERM_DEBUG: call void @__clang_call_terminate 495 // TERM_DEBUG: unreachable 496 for (unsigned i = 131071; i <= 2147483647; i += 127) 497 a[i] += bar(); 498 } 499 // TERM_DEBUG: !{{[0-9]+}} = !MDLocation(line: [[@LINE-11]], 500 #endif // HEADER 501 502