// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ // Test host codegen. // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK5 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK7 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK11 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK11 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK13 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK13 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK15 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK15 // Test target codegen - host bc file has to be created first. // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK17 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK17 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK19 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK19 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK21 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK21 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK23 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK23 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK11 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK11 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK13 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK13 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK15 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -DOMP5 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK15 // expected-no-diagnostics #ifndef HEADER #define HEADER // We have 8 target regions, but only 7 that actually will generate offloading // code, only 6 will have mapped arguments, and only 4 have all-constant map // sizes. // Check target registration is registered as a Ctor. template struct TT{ tx X; ty Y; }; long long get_val() { return 0; } int foo(int n) { int a = 0; short aa = 0; float b[10]; float bn[n]; double c[5][10]; double cn[5][n]; TT d; #pragma omp target parallel for simd nowait for (int i = 3; i < 32; i += 5) { } long long k = get_val(); #pragma omp target parallel for simd if(target: 0) linear(k : 3) schedule(dynamic) for (int i = 10; i > 1; i--) { a += 1; } int lin = 12; #pragma omp target parallel for simd if(target: 1) linear(lin, a : get_val()) for (unsigned long long it = 2000; it >= 600; it-=400) { aa += 1; } #pragma omp target parallel for simd if(target: n>10) for (short it = 6; it <= 20; it-=-4) { a += 1; aa += 1; } // We capture 3 VLA sizes in this target region // The names below are not necessarily consistent with the names used for the // addresses above as some are repeated. #pragma omp target parallel for simd if(target: n>20) schedule(static, a) for (unsigned char it = 'z'; it >= 'a'; it+=-1) { a += 1; b[2] += 1.0; bn[3] += 1.0; c[1][2] += 1.0; cn[1][3] += 1.0; d.X += 1; d.Y += 1; } return a; } // Check that the offloading functions are emitted and that the arguments are // correct and loaded correctly for the target regions in foo(). // Create stack storage and store argument in there. // Create stack storage and store argument in there. // Create stack storage and store argument in there. // Create local storage for each capture. // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. template tx ftemplate(int n) { tx a = 0; short aa = 0; tx b[10]; #pragma omp target parallel for simd if(target: n>40) for (long long i = -10; i < 10; i += 3) { a += 1; aa += 1; b[2] += 1; } return a; } static int fstatic(int n) { int a = 0; short aa = 0; char aaa = 0; int b[10]; #pragma omp target parallel for simd if(target: n>50) for (unsigned i=100; i<10; i+=10) { a += 1; aa += 1; aaa += 1; b[2] += 1; } return a; } struct S1 { double a; int r1(int n){ int b = n+1; short int c[2][n]; #ifdef OMP5 #pragma omp target parallel for simd if(n>60) nontemporal(a) #else #pragma omp target parallel for simd if(target: n>60) #endif // OMP5 for (unsigned long long it = 2000; it >= 600; it -= 400) { this->a = (double)b + 1.5; c[1][1] = ++a; } return c[1][1] + (int)b; } }; int bar(int n){ int a = 0; a += foo(n); S1 S; a += S.r1(n); a += fstatic(n); a += ftemplate(n); return a; } // We capture 2 VLA sizes in this target region // The names below are not necessarily consistent with the names used for the // addresses above as some are repeated. // Check that the offloading functions are emitted and that the arguments are // correct and loaded correctly for the target regions of the callees of bar(). // Create local storage for each capture. // Store captures in the context. // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. // Create local storage for each capture. // Store captures in the context. // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. // Create local storage for each capture. // Store captures in the context. // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. #endif // CHECK1-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: ret i64 0 // // // CHECK1-LABEL: define {{[^@]+}}@_Z3fooi // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x float], align 4 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 // CHECK1-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 // CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK1-NEXT: [[K:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[LIN:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 // CHECK1-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[A_CASTED16:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() // CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates* // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0 // CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]]) // CHECK1-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK1-NEXT: store i64 [[CALL]], i64* [[K]], align 8 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4 // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: [[TMP13:%.*]] = load i64, i64* [[K]], align 8 // CHECK1-NEXT: store i64 [[TMP13]], i64* [[K_CASTED]], align 8 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]] // CHECK1-NEXT: store i32 12, i32* [[LIN]], align 4 // CHECK1-NEXT: [[TMP15:%.*]] = load i16, i16* [[AA]], align 2 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP15]], i16* [[CONV2]], align 2 // CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP17]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* // CHECK1-NEXT: store i32 [[TMP19]], i32* [[CONV5]], align 4 // CHECK1-NEXT: [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* // CHECK1-NEXT: store i64 [[TMP16]], i64* [[TMP22]], align 8 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* // CHECK1-NEXT: store i64 [[TMP16]], i64* [[TMP24]], align 8 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 // CHECK1-NEXT: store i8* null, i8** [[TMP25]], align 8 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* // CHECK1-NEXT: store i64 [[TMP18]], i64* [[TMP27]], align 8 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* // CHECK1-NEXT: store i64 [[TMP18]], i64* [[TMP29]], align 8 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK1-NEXT: store i8* null, i8** [[TMP30]], align 8 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* // CHECK1-NEXT: store i64 [[TMP20]], i64* [[TMP32]], align 8 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* // CHECK1-NEXT: store i64 [[TMP20]], i64* [[TMP34]], align 8 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 // CHECK1-NEXT: store i8* null, i8** [[TMP35]], align 8 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK1-NEXT: store i32 1, i32* [[TMP38]], align 4 // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK1-NEXT: store i32 3, i32* [[TMP39]], align 4 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK1-NEXT: store i8** [[TMP36]], i8*** [[TMP40]], align 8 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK1-NEXT: store i8** [[TMP37]], i8*** [[TMP41]], align 8 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP42]], align 8 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP43]], align 8 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK1-NEXT: store i8** null, i8*** [[TMP44]], align 8 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK1-NEXT: store i8** null, i8*** [[TMP45]], align 8 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK1-NEXT: store i64 0, i64* [[TMP46]], align 8 // CHECK1-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK1-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0 // CHECK1-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* // CHECK1-NEXT: store i32 [[TMP49]], i32* [[CONV7]], align 4 // CHECK1-NEXT: [[TMP50:%.*]] = load i64, i64* [[A_CASTED6]], align 8 // CHECK1-NEXT: [[TMP51:%.*]] = load i16, i16* [[AA]], align 2 // CHECK1-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* // CHECK1-NEXT: store i16 [[TMP51]], i16* [[CONV9]], align 2 // CHECK1-NEXT: [[TMP52:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 // CHECK1-NEXT: [[TMP53:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP53]], 10 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: // CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 // CHECK1-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64* // CHECK1-NEXT: store i64 [[TMP50]], i64* [[TMP55]], align 8 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 // CHECK1-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to i64* // CHECK1-NEXT: store i64 [[TMP50]], i64* [[TMP57]], align 8 // CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 // CHECK1-NEXT: store i8* null, i8** [[TMP58]], align 8 // CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 // CHECK1-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i64* // CHECK1-NEXT: store i64 [[TMP52]], i64* [[TMP60]], align 8 // CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 // CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* // CHECK1-NEXT: store i64 [[TMP52]], i64* [[TMP62]], align 8 // CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 // CHECK1-NEXT: store i8* null, i8** [[TMP63]], align 8 // CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 // CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS13:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 // CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 0 // CHECK1-NEXT: store i32 1, i32* [[TMP66]], align 4 // CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 1 // CHECK1-NEXT: store i32 2, i32* [[TMP67]], align 4 // CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 2 // CHECK1-NEXT: store i8** [[TMP64]], i8*** [[TMP68]], align 8 // CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 3 // CHECK1-NEXT: store i8** [[TMP65]], i8*** [[TMP69]], align 8 // CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 4 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP70]], align 8 // CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 5 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP71]], align 8 // CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 6 // CHECK1-NEXT: store i8** null, i8*** [[TMP72]], align 8 // CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 7 // CHECK1-NEXT: store i8** null, i8*** [[TMP73]], align 8 // CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 8 // CHECK1-NEXT: store i64 0, i64* [[TMP74]], align 8 // CHECK1-NEXT: [[TMP75:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]]) // CHECK1-NEXT: [[TMP76:%.*]] = icmp ne i32 [[TMP75]], 0 // CHECK1-NEXT: br i1 [[TMP76]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] // CHECK1: omp_offload.failed14: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP50]], i64 [[TMP52]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT15]] // CHECK1: omp_offload.cont15: // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] // CHECK1: omp_if.else: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP50]], i64 [[TMP52]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: // CHECK1-NEXT: [[TMP77:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: store i32 [[TMP77]], i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK1-NEXT: [[TMP78:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[CONV17:%.*]] = bitcast i64* [[A_CASTED16]] to i32* // CHECK1-NEXT: store i32 [[TMP78]], i32* [[CONV17]], align 4 // CHECK1-NEXT: [[TMP79:%.*]] = load i64, i64* [[A_CASTED16]], align 8 // CHECK1-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK1-NEXT: [[CONV18:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP80]], i32* [[CONV18]], align 4 // CHECK1-NEXT: [[TMP81:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK1-NEXT: [[TMP82:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CMP19:%.*]] = icmp sgt i32 [[TMP82]], 20 // CHECK1-NEXT: br i1 [[CMP19]], label [[OMP_IF_THEN20:%.*]], label [[OMP_IF_ELSE27:%.*]] // CHECK1: omp_if.then20: // CHECK1-NEXT: [[TMP83:%.*]] = mul nuw i64 [[TMP2]], 4 // CHECK1-NEXT: [[TMP84:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK1-NEXT: [[TMP85:%.*]] = mul nuw i64 [[TMP84]], 8 // CHECK1-NEXT: [[TMP86:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8* // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP86]], i8* align 8 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i64 80, i1 false) // CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK1-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* // CHECK1-NEXT: store i64 [[TMP79]], i64* [[TMP88]], align 8 // CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK1-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* // CHECK1-NEXT: store i64 [[TMP79]], i64* [[TMP90]], align 8 // CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 // CHECK1-NEXT: store i8* null, i8** [[TMP91]], align 8 // CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 // CHECK1-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [10 x float]** // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP93]], align 8 // CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 // CHECK1-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 8 // CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 // CHECK1-NEXT: store i8* null, i8** [[TMP96]], align 8 // CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 // CHECK1-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP98]], align 8 // CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 // CHECK1-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP100]], align 8 // CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 // CHECK1-NEXT: store i8* null, i8** [[TMP101]], align 8 // CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 // CHECK1-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to float** // CHECK1-NEXT: store float* [[VLA]], float** [[TMP103]], align 8 // CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 // CHECK1-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** // CHECK1-NEXT: store float* [[VLA]], float** [[TMP105]], align 8 // CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 // CHECK1-NEXT: store i64 [[TMP83]], i64* [[TMP106]], align 8 // CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK1-NEXT: store i8* null, i8** [[TMP107]], align 8 // CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 // CHECK1-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to [5 x [10 x double]]** // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP109]], align 8 // CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 // CHECK1-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to [5 x [10 x double]]** // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP111]], align 8 // CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 4 // CHECK1-NEXT: store i8* null, i8** [[TMP112]], align 8 // CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 // CHECK1-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* // CHECK1-NEXT: store i64 5, i64* [[TMP114]], align 8 // CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 // CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i64* // CHECK1-NEXT: store i64 5, i64* [[TMP116]], align 8 // CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 5 // CHECK1-NEXT: store i8* null, i8** [[TMP117]], align 8 // CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 // CHECK1-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i64* // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP119]], align 8 // CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 // CHECK1-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP121]], align 8 // CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 6 // CHECK1-NEXT: store i8* null, i8** [[TMP122]], align 8 // CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 // CHECK1-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double** // CHECK1-NEXT: store double* [[VLA1]], double** [[TMP124]], align 8 // CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 // CHECK1-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to double** // CHECK1-NEXT: store double* [[VLA1]], double** [[TMP126]], align 8 // CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 // CHECK1-NEXT: store i64 [[TMP85]], i64* [[TMP127]], align 8 // CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 7 // CHECK1-NEXT: store i8* null, i8** [[TMP128]], align 8 // CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 // CHECK1-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 8 // CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 // CHECK1-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to %struct.TT** // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP132]], align 8 // CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 8 // CHECK1-NEXT: store i8* null, i8** [[TMP133]], align 8 // CHECK1-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 9 // CHECK1-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* // CHECK1-NEXT: store i64 [[TMP81]], i64* [[TMP135]], align 8 // CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 9 // CHECK1-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* // CHECK1-NEXT: store i64 [[TMP81]], i64* [[TMP137]], align 8 // CHECK1-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 9 // CHECK1-NEXT: store i8* null, i8** [[TMP138]], align 8 // CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS24:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 // CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 0 // CHECK1-NEXT: store i32 1, i32* [[TMP142]], align 4 // CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 1 // CHECK1-NEXT: store i32 10, i32* [[TMP143]], align 4 // CHECK1-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 2 // CHECK1-NEXT: store i8** [[TMP139]], i8*** [[TMP144]], align 8 // CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 3 // CHECK1-NEXT: store i8** [[TMP140]], i8*** [[TMP145]], align 8 // CHECK1-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 4 // CHECK1-NEXT: store i64* [[TMP141]], i64** [[TMP146]], align 8 // CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 5 // CHECK1-NEXT: store i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP147]], align 8 // CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 6 // CHECK1-NEXT: store i8** null, i8*** [[TMP148]], align 8 // CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 7 // CHECK1-NEXT: store i8** null, i8*** [[TMP149]], align 8 // CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 8 // CHECK1-NEXT: store i64 0, i64* [[TMP150]], align 8 // CHECK1-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]]) // CHECK1-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 // CHECK1-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK1: omp_offload.failed25: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP79]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP81]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT26]] // CHECK1: omp_offload.cont26: // CHECK1-NEXT: br label [[OMP_IF_END28:%.*]] // CHECK1: omp_if.else27: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP79]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP81]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END28]] // CHECK1: omp_if.end28: // CHECK1-NEXT: [[TMP153:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) // CHECK1-NEXT: ret i32 [[TMP153]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 // CHECK1-SAME: () #[[ATTR2:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK1-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i32 33, i32* [[I]], align 4 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry. // CHECK1-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 // CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 // CHECK1-NEXT: [[KERNEL_ARGS_I:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 // CHECK1-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* // CHECK1-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) // CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 // CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 0 // CHECK1-NEXT: store i32 1, i32* [[TMP11]], align 4, !noalias !25 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 1 // CHECK1-NEXT: store i32 0, i32* [[TMP12]], align 4, !noalias !25 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 2 // CHECK1-NEXT: store i8** null, i8*** [[TMP13]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 3 // CHECK1-NEXT: store i8** null, i8*** [[TMP14]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 4 // CHECK1-NEXT: store i64* null, i64** [[TMP15]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 5 // CHECK1-NEXT: store i64* null, i64** [[TMP16]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 6 // CHECK1-NEXT: store i8** null, i8*** [[TMP17]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 7 // CHECK1-NEXT: store i8** null, i8*** [[TMP18]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 8 // CHECK1-NEXT: store i64 0, i64* [[TMP19]], align 8, !noalias !25 // CHECK1-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel_nowait(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i8* null, i32 0, i8* null) // CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK1-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK1: omp_offload.failed.i: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK1: .omp_outlined..1.exit: // CHECK1-NEXT: ret i32 0 // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[K1:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 // CHECK1-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]] // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] // CHECK1-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 // CHECK1-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] // CHECK1-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK1-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i32 1, i32* [[I]], align 4 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK1: .omp.linear.pu: // CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[K1]], align 8 // CHECK1-NEXT: store i64 [[TMP16]], i64* [[K_ADDR]], align 8 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK1: .omp.linear.pu.done: // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 // CHECK1-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4 // CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[LIN4:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[A5:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK1-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29:![0-9]+]] // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK1-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK1-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK1-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 // CHECK1-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK1-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] // CHECK1-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 // CHECK1-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 // CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 // CHECK1-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 // CHECK1-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: // CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 // CHECK1-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK1-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK1-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK1: .omp.linear.pu: // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 // CHECK1-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 4 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 // CHECK1-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 4 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK1: .omp.linear.pu.done: // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP:%.*]] = alloca i16, align 2 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[IT:%.*]] = alloca i16, align 2 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 // CHECK1-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 // CHECK1-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 // CHECK1-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i16 22, i16* [[IT]], align 2 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 // CHECK1-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 // CHECK1-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK1-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4 // CHECK1-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[IT:%.*]] = alloca i8, align 1 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 // CHECK1-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK1: omp.dispatch.cond: // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK1: omp.dispatch.body: // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK1-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 // CHECK1-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 // CHECK1-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double // CHECK1-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 // CHECK1-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float // CHECK1-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 // CHECK1-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double // CHECK1-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 // CHECK1-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float // CHECK1-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 // CHECK1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 // CHECK1-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK1-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK1-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] // CHECK1-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 // CHECK1-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK1-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 // CHECK1-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK1-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 // CHECK1-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 // CHECK1-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 // CHECK1-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 // CHECK1-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK1-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK1: omp.dispatch.inc: // CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK1-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK1-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK1-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK1: omp.dispatch.end: // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK1-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i8 96, i8* [[IT]], align 1 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@_Z3bari // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK1-NEXT: store i32 [[ADD]], i32* [[A]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK1-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK1-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK1-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: ret i32 [[TMP8]] // // // CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei // CHECK1-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK1-NEXT: store i32 [[ADD]], i32* [[B]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() // CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK1-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK1-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8* // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP10]], i8* align 8 bitcast ([5 x i64]* @.offload_sizes.11 to i8*), i64 40, i1 false) // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to %struct.S1** // CHECK1-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP12]], align 8 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to double** // CHECK1-NEXT: store double* [[A]], double** [[TMP14]], align 8 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 // CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* // CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* // CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK1-NEXT: store i8* null, i8** [[TMP20]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* // CHECK1-NEXT: store i64 2, i64* [[TMP22]], align 8 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* // CHECK1-NEXT: store i64 2, i64* [[TMP24]], align 8 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 // CHECK1-NEXT: store i8* null, i8** [[TMP25]], align 8 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP27]], align 8 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 // CHECK1-NEXT: store i8* null, i8** [[TMP30]], align 8 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i16** // CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP32]], align 8 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** // CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 8 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 // CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP35]], align 8 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 // CHECK1-NEXT: store i8* null, i8** [[TMP36]], align 8 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK1-NEXT: store i32 1, i32* [[TMP40]], align 4 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK1-NEXT: store i32 5, i32* [[TMP41]], align 4 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK1-NEXT: store i8** [[TMP37]], i8*** [[TMP42]], align 8 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK1-NEXT: store i8** [[TMP38]], i8*** [[TMP43]], align 8 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK1-NEXT: store i64* [[TMP39]], i64** [[TMP44]], align 8 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK1-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP45]], align 8 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK1-NEXT: store i8** null, i8*** [[TMP46]], align 8 // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK1-NEXT: store i8** null, i8*** [[TMP47]], align 8 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK1-NEXT: store i64 0, i64* [[TMP48]], align 8 // CHECK1-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK1-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 // CHECK1-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] // CHECK1: omp_if.else: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: // CHECK1-NEXT: [[TMP51:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP51]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 // CHECK1-NEXT: [[TMP52:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 // CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP52]] to i32 // CHECK1-NEXT: [[TMP53:%.*]] = load i32, i32* [[B]], align 4 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP53]] // CHECK1-NEXT: [[TMP54:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP54]]) // CHECK1-NEXT: ret i32 [[ADD4]] // // // CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK1-NEXT: [[AAA:%.*]] = alloca i8, align 1 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK1-NEXT: store i8 0, i8* [[AAA]], align 1 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK1-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 // CHECK1-NEXT: store i8* null, i8** [[TMP11]], align 8 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK1-NEXT: store i8* null, i8** [[TMP16]], align 8 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 // CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 // CHECK1-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK1-NEXT: store i32 1, i32* [[TMP29]], align 4 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK1-NEXT: store i32 4, i32* [[TMP30]], align 4 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK1-NEXT: store i8** [[TMP27]], i8*** [[TMP31]], align 8 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK1-NEXT: store i8** [[TMP28]], i8*** [[TMP32]], align 8 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK1-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP33]], align 8 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK1-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP34]], align 8 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK1-NEXT: store i8** null, i8*** [[TMP35]], align 8 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK1-NEXT: store i8** null, i8*** [[TMP36]], align 8 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK1-NEXT: store i64 0, i64* [[TMP37]], align 8 // CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 // CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] // CHECK1: omp_if.else: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: // CHECK1-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: ret i32 [[TMP40]] // // // CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i // CHECK1-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK1: omp_if.then: // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK1-NEXT: store i32 1, i32* [[TMP22]], align 4 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK1-NEXT: store i32 3, i32* [[TMP23]], align 4 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK1-NEXT: store i8** [[TMP20]], i8*** [[TMP24]], align 8 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK1-NEXT: store i8** [[TMP21]], i8*** [[TMP25]], align 8 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64** [[TMP26]], align 8 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i64** [[TMP27]], align 8 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK1-NEXT: store i8** null, i8*** [[TMP28]], align 8 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK1-NEXT: store i8** null, i8*** [[TMP29]], align 8 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK1-NEXT: store i64 0, i64* [[TMP30]], align 8 // CHECK1-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK1-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK1-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK1: omp_offload.cont: // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] // CHECK1: omp_if.else: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: // CHECK1-NEXT: [[TMP33:%.*]] = load i32, i32* [[A]], align 4 // CHECK1-NEXT: ret i32 [[TMP33]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 // CHECK1-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38:![0-9]+]] // CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK1-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK1-NEXT: store double [[ADD]], double* [[A]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK1-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK1-NEXT: store double [[INC]], double* [[A5]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK1-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 // CHECK1-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 // CHECK1-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK1-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 // CHECK1-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK1: cond.true: // CHECK1-NEXT: br label [[COND_END:%.*]] // CHECK1: cond.false: // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: br label [[COND_END]] // CHECK1: cond.end: // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK1: omp.inner.for.cond: // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP41:![0-9]+]] // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK1: omp.inner.for.body: // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 // CHECK1-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 // CHECK1-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK1-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK1: omp.body.continue: // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK1: omp.inner.for.inc: // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK1-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP41]] // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] // CHECK1: omp.inner.for.end: // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK1: omp.loop.exit: // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK1: .omp.final.then: // CHECK1-NEXT: store i64 11, i64* [[I]], align 8 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK1: .omp.final.done: // CHECK1-NEXT: ret void // // // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg // CHECK1-SAME: () #[[ATTR8:[0-9]+]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) // CHECK1-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK3-SAME: () #[[ATTR0:[0-9]+]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: ret i64 0 // // // CHECK3-LABEL: define {{[^@]+}}@_Z3fooi // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x float], align 4 // CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 // CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 // CHECK3-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 // CHECK3-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK3-NEXT: [[K:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[LIN:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 // CHECK3-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED12:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() // CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates* // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0 // CHECK3-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]]) // CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK3-NEXT: store i64 [[CALL]], i64* [[K]], align 8 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]] // CHECK3-NEXT: store i32 12, i32* [[LIN]], align 4 // CHECK3-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4 // CHECK3-NEXT: store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: store i32 [[TMP15]], i32* [[A_CASTED2]], align 4 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* // CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP18]], align 4 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* // CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP20]], align 4 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 // CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK3-NEXT: store i32 [[TMP14]], i32* [[TMP23]], align 4 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* // CHECK3-NEXT: store i32 [[TMP14]], i32* [[TMP25]], align 4 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* // CHECK3-NEXT: store i32 [[TMP16]], i32* [[TMP28]], align 4 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* // CHECK3-NEXT: store i32 [[TMP16]], i32* [[TMP30]], align 4 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 // CHECK3-NEXT: store i8* null, i8** [[TMP31]], align 4 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK3-NEXT: store i32 1, i32* [[TMP34]], align 4 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK3-NEXT: store i32 3, i32* [[TMP35]], align 4 // CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK3-NEXT: store i8** [[TMP32]], i8*** [[TMP36]], align 4 // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK3-NEXT: store i8** [[TMP33]], i8*** [[TMP37]], align 4 // CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP38]], align 4 // CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP39]], align 4 // CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK3-NEXT: store i8** null, i8*** [[TMP40]], align 4 // CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK3-NEXT: store i8** null, i8*** [[TMP41]], align 4 // CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK3-NEXT: store i64 0, i64* [[TMP42]], align 8 // CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 // CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: [[TMP45:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: store i32 [[TMP45]], i32* [[A_CASTED3]], align 4 // CHECK3-NEXT: [[TMP46:%.*]] = load i32, i32* [[A_CASTED3]], align 4 // CHECK3-NEXT: [[TMP47:%.*]] = load i16, i16* [[AA]], align 2 // CHECK3-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* // CHECK3-NEXT: store i16 [[TMP47]], i16* [[CONV5]], align 2 // CHECK3-NEXT: [[TMP48:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 // CHECK3-NEXT: [[TMP49:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP49]], 10 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: // CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 // CHECK3-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32* // CHECK3-NEXT: store i32 [[TMP46]], i32* [[TMP51]], align 4 // CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 // CHECK3-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32* // CHECK3-NEXT: store i32 [[TMP46]], i32* [[TMP53]], align 4 // CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 // CHECK3-NEXT: store i8* null, i8** [[TMP54]], align 4 // CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 // CHECK3-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32* // CHECK3-NEXT: store i32 [[TMP48]], i32* [[TMP56]], align 4 // CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 // CHECK3-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32* // CHECK3-NEXT: store i32 [[TMP48]], i32* [[TMP58]], align 4 // CHECK3-NEXT: [[TMP59:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 // CHECK3-NEXT: store i8* null, i8** [[TMP59]], align 4 // CHECK3-NEXT: [[TMP60:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 // CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS9:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 // CHECK3-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 0 // CHECK3-NEXT: store i32 1, i32* [[TMP62]], align 4 // CHECK3-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 1 // CHECK3-NEXT: store i32 2, i32* [[TMP63]], align 4 // CHECK3-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 2 // CHECK3-NEXT: store i8** [[TMP60]], i8*** [[TMP64]], align 4 // CHECK3-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 3 // CHECK3-NEXT: store i8** [[TMP61]], i8*** [[TMP65]], align 4 // CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 4 // CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP66]], align 4 // CHECK3-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 5 // CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP67]], align 4 // CHECK3-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 6 // CHECK3-NEXT: store i8** null, i8*** [[TMP68]], align 4 // CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 7 // CHECK3-NEXT: store i8** null, i8*** [[TMP69]], align 4 // CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 8 // CHECK3-NEXT: store i64 0, i64* [[TMP70]], align 8 // CHECK3-NEXT: [[TMP71:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]]) // CHECK3-NEXT: [[TMP72:%.*]] = icmp ne i32 [[TMP71]], 0 // CHECK3-NEXT: br i1 [[TMP72]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]] // CHECK3: omp_offload.failed10: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP46]], i32 [[TMP48]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT11]] // CHECK3: omp_offload.cont11: // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] // CHECK3: omp_if.else: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP46]], i32 [[TMP48]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: // CHECK3-NEXT: [[TMP73:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK3-NEXT: [[TMP74:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: store i32 [[TMP74]], i32* [[A_CASTED12]], align 4 // CHECK3-NEXT: [[TMP75:%.*]] = load i32, i32* [[A_CASTED12]], align 4 // CHECK3-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK3-NEXT: store i32 [[TMP76]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK3-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK3-NEXT: [[TMP78:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CMP13:%.*]] = icmp sgt i32 [[TMP78]], 20 // CHECK3-NEXT: br i1 [[CMP13]], label [[OMP_IF_THEN14:%.*]], label [[OMP_IF_ELSE21:%.*]] // CHECK3: omp_if.then14: // CHECK3-NEXT: [[TMP79:%.*]] = mul nuw i32 [[TMP1]], 4 // CHECK3-NEXT: [[TMP80:%.*]] = sext i32 [[TMP79]] to i64 // CHECK3-NEXT: [[TMP81:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK3-NEXT: [[TMP82:%.*]] = mul nuw i32 [[TMP81]], 8 // CHECK3-NEXT: [[TMP83:%.*]] = sext i32 [[TMP82]] to i64 // CHECK3-NEXT: [[TMP84:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8* // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP84]], i8* align 4 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i32 80, i1 false) // CHECK3-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0 // CHECK3-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32* // CHECK3-NEXT: store i32 [[TMP75]], i32* [[TMP86]], align 4 // CHECK3-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 0 // CHECK3-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* // CHECK3-NEXT: store i32 [[TMP75]], i32* [[TMP88]], align 4 // CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0 // CHECK3-NEXT: store i8* null, i8** [[TMP89]], align 4 // CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 1 // CHECK3-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [10 x float]** // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP91]], align 4 // CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 1 // CHECK3-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [10 x float]** // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP93]], align 4 // CHECK3-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 1 // CHECK3-NEXT: store i8* null, i8** [[TMP94]], align 4 // CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 2 // CHECK3-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP96]], align 4 // CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 2 // CHECK3-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP98]], align 4 // CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 2 // CHECK3-NEXT: store i8* null, i8** [[TMP99]], align 4 // CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 3 // CHECK3-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to float** // CHECK3-NEXT: store float* [[VLA]], float** [[TMP101]], align 4 // CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 3 // CHECK3-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to float** // CHECK3-NEXT: store float* [[VLA]], float** [[TMP103]], align 4 // CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 // CHECK3-NEXT: store i64 [[TMP80]], i64* [[TMP104]], align 4 // CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 3 // CHECK3-NEXT: store i8* null, i8** [[TMP105]], align 4 // CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 4 // CHECK3-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [5 x [10 x double]]** // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP107]], align 4 // CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 4 // CHECK3-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to [5 x [10 x double]]** // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP109]], align 4 // CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 4 // CHECK3-NEXT: store i8* null, i8** [[TMP110]], align 4 // CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 5 // CHECK3-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32* // CHECK3-NEXT: store i32 5, i32* [[TMP112]], align 4 // CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 5 // CHECK3-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* // CHECK3-NEXT: store i32 5, i32* [[TMP114]], align 4 // CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 5 // CHECK3-NEXT: store i8* null, i8** [[TMP115]], align 4 // CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 6 // CHECK3-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP117]], align 4 // CHECK3-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 6 // CHECK3-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32* // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP119]], align 4 // CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 6 // CHECK3-NEXT: store i8* null, i8** [[TMP120]], align 4 // CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 7 // CHECK3-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double** // CHECK3-NEXT: store double* [[VLA1]], double** [[TMP122]], align 4 // CHECK3-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 7 // CHECK3-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double** // CHECK3-NEXT: store double* [[VLA1]], double** [[TMP124]], align 4 // CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 // CHECK3-NEXT: store i64 [[TMP83]], i64* [[TMP125]], align 4 // CHECK3-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 7 // CHECK3-NEXT: store i8* null, i8** [[TMP126]], align 4 // CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 8 // CHECK3-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to %struct.TT** // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP128]], align 4 // CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 8 // CHECK3-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 4 // CHECK3-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 8 // CHECK3-NEXT: store i8* null, i8** [[TMP131]], align 4 // CHECK3-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 9 // CHECK3-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* // CHECK3-NEXT: store i32 [[TMP77]], i32* [[TMP133]], align 4 // CHECK3-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 9 // CHECK3-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* // CHECK3-NEXT: store i32 [[TMP77]], i32* [[TMP135]], align 4 // CHECK3-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 9 // CHECK3-NEXT: store i8* null, i8** [[TMP136]], align 4 // CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0 // CHECK3-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 0 // CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS18:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 // CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 0 // CHECK3-NEXT: store i32 1, i32* [[TMP140]], align 4 // CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 1 // CHECK3-NEXT: store i32 10, i32* [[TMP141]], align 4 // CHECK3-NEXT: [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 2 // CHECK3-NEXT: store i8** [[TMP137]], i8*** [[TMP142]], align 4 // CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 3 // CHECK3-NEXT: store i8** [[TMP138]], i8*** [[TMP143]], align 4 // CHECK3-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 4 // CHECK3-NEXT: store i64* [[TMP139]], i64** [[TMP144]], align 4 // CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 5 // CHECK3-NEXT: store i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP145]], align 4 // CHECK3-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 6 // CHECK3-NEXT: store i8** null, i8*** [[TMP146]], align 4 // CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 7 // CHECK3-NEXT: store i8** null, i8*** [[TMP147]], align 4 // CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 8 // CHECK3-NEXT: store i64 0, i64* [[TMP148]], align 8 // CHECK3-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]]) // CHECK3-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 // CHECK3-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] // CHECK3: omp_offload.failed19: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP75]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP77]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT20]] // CHECK3: omp_offload.cont20: // CHECK3-NEXT: br label [[OMP_IF_END22:%.*]] // CHECK3: omp_if.else21: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP75]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP77]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END22]] // CHECK3: omp_if.end22: // CHECK3-NEXT: [[TMP151:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) // CHECK3-NEXT: ret i32 [[TMP151]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 // CHECK3-SAME: () #[[ATTR2:[0-9]+]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK3-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i32 33, i32* [[I]], align 4 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_task_entry. // CHECK3-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 // CHECK3-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 // CHECK3-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 // CHECK3-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 // CHECK3-NEXT: [[KERNEL_ARGS_I:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 // CHECK3-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* // CHECK3-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) // CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 0 // CHECK3-NEXT: store i32 1, i32* [[TMP11]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 1 // CHECK3-NEXT: store i32 0, i32* [[TMP12]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 2 // CHECK3-NEXT: store i8** null, i8*** [[TMP13]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 3 // CHECK3-NEXT: store i8** null, i8*** [[TMP14]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 4 // CHECK3-NEXT: store i64* null, i64** [[TMP15]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 5 // CHECK3-NEXT: store i64* null, i64** [[TMP16]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 6 // CHECK3-NEXT: store i8** null, i8*** [[TMP17]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 7 // CHECK3-NEXT: store i8** null, i8*** [[TMP18]], align 4, !noalias !26 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 8 // CHECK3-NEXT: store i64 0, i64* [[TMP19]], align 8, !noalias !26 // CHECK3-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel_nowait(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i8* null, i32 0, i8* null) // CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK3-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK3: omp_offload.failed.i: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK3: .omp_outlined..1.exit: // CHECK3-NEXT: ret i32 0 // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 // CHECK3-SAME: (i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[K1:%.*]] = alloca i64, align 8 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 // CHECK3-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] // CHECK3-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 // CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] // CHECK3-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3: omp.dispatch.end: // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i32 1, i32* [[I]], align 4 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK3-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK3: .omp.linear.pu: // CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[K1]], align 8 // CHECK3-NEXT: store i64 [[TMP17]], i64* [[TMP0]], align 8 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK3: .omp.linear.pu.done: // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 // CHECK3-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK3-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK3-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[LIN2:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A3:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK3-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK3-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]] // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK3-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 // CHECK3-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK3-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] // CHECK3-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 // CHECK3-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 // CHECK3-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK3-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] // CHECK3-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 // CHECK3-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 // CHECK3-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 // CHECK3-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: // CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 // CHECK3-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK3-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK3-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK3: .omp.linear.pu: // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 // CHECK3-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 // CHECK3-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK3: .omp.linear.pu.done: // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i16, align 2 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[IT:%.*]] = alloca i16, align 2 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 // CHECK3-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 // CHECK3-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 // CHECK3-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK3-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i16 22, i16* [[IT]], align 2 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 // CHECK3-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 // CHECK3-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 // CHECK3-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[IT:%.*]] = alloca i8, align 1 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 // CHECK3-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 // CHECK3-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK3: omp.dispatch.cond: // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK3: omp.dispatch.body: // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK3-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 // CHECK3-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK3-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK3-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK3-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 // CHECK3-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK3-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK3-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK3-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 // CHECK3-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK3-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK3-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] // CHECK3-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 // CHECK3-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK3-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 // CHECK3-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK3-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 // CHECK3-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK3-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK3-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 // CHECK3-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK3-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK3: omp.dispatch.inc: // CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK3-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 // CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK3-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK3: omp.dispatch.end: // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK3-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i8 96, i8* [[IT]], align 1 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@_Z3bari // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK3-NEXT: store i32 [[ADD]], i32* [[A]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK3-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK3-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK3-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: ret i32 [[TMP8]] // // // CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei // CHECK3-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 // CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK3-NEXT: store i32 [[ADD]], i32* [[B]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() // CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK3-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2 // CHECK3-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast [5 x i64]* [[DOTOFFLOAD_SIZES]] to i8* // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP10]], i8* align 4 bitcast ([5 x i64]* @.offload_sizes.11 to i8*), i32 40, i1 false) // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to %struct.S1** // CHECK3-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP12]], align 4 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to double** // CHECK3-NEXT: store double* [[A]], double** [[TMP14]], align 4 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 // CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK3-NEXT: store i8* null, i8** [[TMP20]], align 4 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* // CHECK3-NEXT: store i32 2, i32* [[TMP22]], align 4 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK3-NEXT: store i32 2, i32* [[TMP24]], align 4 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 // CHECK3-NEXT: store i8* null, i8** [[TMP25]], align 4 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 // CHECK3-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 // CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 // CHECK3-NEXT: store i8* null, i8** [[TMP30]], align 4 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 // CHECK3-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i16** // CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP32]], align 4 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 // CHECK3-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** // CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 4 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 // CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP35]], align 4 // CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 // CHECK3-NEXT: store i8* null, i8** [[TMP36]], align 4 // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK3-NEXT: store i32 1, i32* [[TMP40]], align 4 // CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK3-NEXT: store i32 5, i32* [[TMP41]], align 4 // CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK3-NEXT: store i8** [[TMP37]], i8*** [[TMP42]], align 4 // CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK3-NEXT: store i8** [[TMP38]], i8*** [[TMP43]], align 4 // CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK3-NEXT: store i64* [[TMP39]], i64** [[TMP44]], align 4 // CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK3-NEXT: store i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP45]], align 4 // CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK3-NEXT: store i8** null, i8*** [[TMP46]], align 4 // CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK3-NEXT: store i8** null, i8*** [[TMP47]], align 4 // CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK3-NEXT: store i64 0, i64* [[TMP48]], align 8 // CHECK3-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK3-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 // CHECK3-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] // CHECK3: omp_if.else: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: // CHECK3-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP51]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 // CHECK3-NEXT: [[TMP52:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP52]] to i32 // CHECK3-NEXT: [[TMP53:%.*]] = load i32, i32* [[B]], align 4 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP53]] // CHECK3-NEXT: [[TMP54:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 // CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP54]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // // CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK3-NEXT: [[AAA:%.*]] = alloca i8, align 1 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK3-NEXT: store i8 0, i8* [[AAA]], align 1 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK3-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 // CHECK3-NEXT: store i8* null, i8** [[TMP11]], align 4 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK3-NEXT: store i8* null, i8** [[TMP16]], align 4 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 // CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 // CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK3-NEXT: store i32 1, i32* [[TMP29]], align 4 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK3-NEXT: store i32 4, i32* [[TMP30]], align 4 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK3-NEXT: store i8** [[TMP27]], i8*** [[TMP31]], align 4 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK3-NEXT: store i8** [[TMP28]], i8*** [[TMP32]], align 4 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK3-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP33]], align 4 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK3-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP34]], align 4 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK3-NEXT: store i8** null, i8*** [[TMP35]], align 4 // CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK3-NEXT: store i8** null, i8*** [[TMP36]], align 4 // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK3-NEXT: store i64 0, i64* [[TMP37]], align 8 // CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK3-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 // CHECK3-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] // CHECK3: omp_if.else: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: // CHECK3-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: ret i32 [[TMP40]] // // // CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i // CHECK3-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK3: omp_if.then: // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 // CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 // CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK3-NEXT: store i32 1, i32* [[TMP22]], align 4 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK3-NEXT: store i32 3, i32* [[TMP23]], align 4 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK3-NEXT: store i8** [[TMP20]], i8*** [[TMP24]], align 4 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK3-NEXT: store i8** [[TMP21]], i8*** [[TMP25]], align 4 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64** [[TMP26]], align 4 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i64** [[TMP27]], align 4 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK3-NEXT: store i8** null, i8*** [[TMP28]], align 4 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK3-NEXT: store i8** null, i8*** [[TMP29]], align 4 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK3-NEXT: store i64 0, i64* [[TMP30]], align 8 // CHECK3-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK3-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK3-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK3: omp_offload.cont: // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] // CHECK3: omp_if.else: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: // CHECK3-NEXT: [[TMP33:%.*]] = load i32, i32* [[A]], align 4 // CHECK3-NEXT: ret i32 [[TMP33]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 // CHECK3-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK3-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39:![0-9]+]] // CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: // CHECK3-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK3-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK3-NEXT: store double [[ADD]], double* [[A]], align 4, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK3-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK3-NEXT: store double [[INC]], double* [[A4]], align 4, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK3-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 // CHECK3-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: // CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 // CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK3-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK3-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1 // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK3-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 // CHECK3-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK3-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK3-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK3: cond.true: // CHECK3-NEXT: br label [[COND_END:%.*]] // CHECK3: cond.false: // CHECK3-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: br label [[COND_END]] // CHECK3: cond.end: // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK3-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK3: omp.inner.for.cond: // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42:![0-9]+]] // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK3: omp.inner.for.body: // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK3-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK3-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 // CHECK3-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK3-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK3: omp.body.continue: // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK3: omp.inner.for.inc: // CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP42]] // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] // CHECK3: omp.inner.for.end: // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK3: omp.loop.exit: // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK3: .omp.final.then: // CHECK3-NEXT: store i64 11, i64* [[I]], align 8 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK3: .omp.final.done: // CHECK3-NEXT: ret void // // // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg // CHECK3-SAME: () #[[ATTR8:[0-9]+]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: call void @__tgt_register_requires(i64 1) // CHECK3-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK5-SAME: () #[[ATTR0:[0-9]+]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: ret i64 0 // // // CHECK5-LABEL: define {{[^@]+}}@_Z3fooi // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK5-NEXT: [[B:%.*]] = alloca [10 x float], align 4 // CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 // CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 // CHECK5-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 // CHECK5-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK5-NEXT: [[K:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[LIN:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 // CHECK5-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[A_CASTED16:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [10 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [10 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 // CHECK5-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK5-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() // CHECK5-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 // CHECK5-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK5-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 // CHECK5-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 40, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.kmp_task_t_with_privates* // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP8]], i32 0, i32 0 // CHECK5-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP7]]) // CHECK5-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK5-NEXT: store i64 [[CALL]], i64* [[K]], align 8 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP11]], i32* [[CONV]], align 4 // CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: [[TMP13:%.*]] = load i64, i64* [[K]], align 8 // CHECK5-NEXT: store i64 [[TMP13]], i64* [[K_CASTED]], align 8 // CHECK5-NEXT: [[TMP14:%.*]] = load i64, i64* [[K_CASTED]], align 8 // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i64 [[TMP12]], i64 [[TMP14]]) #[[ATTR4:[0-9]+]] // CHECK5-NEXT: store i32 12, i32* [[LIN]], align 4 // CHECK5-NEXT: [[TMP15:%.*]] = load i16, i16* [[AA]], align 2 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP15]], i16* [[CONV2]], align 2 // CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK5-NEXT: [[TMP17:%.*]] = load i32, i32* [[LIN]], align 4 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP17]], i32* [[CONV3]], align 4 // CHECK5-NEXT: [[TMP18:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 // CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* // CHECK5-NEXT: store i32 [[TMP19]], i32* [[CONV5]], align 4 // CHECK5-NEXT: [[TMP20:%.*]] = load i64, i64* [[A_CASTED4]], align 8 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* // CHECK5-NEXT: store i64 [[TMP16]], i64* [[TMP22]], align 8 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* // CHECK5-NEXT: store i64 [[TMP16]], i64* [[TMP24]], align 8 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 // CHECK5-NEXT: store i8* null, i8** [[TMP25]], align 8 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK5-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* // CHECK5-NEXT: store i64 [[TMP18]], i64* [[TMP27]], align 8 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK5-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* // CHECK5-NEXT: store i64 [[TMP18]], i64* [[TMP29]], align 8 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK5-NEXT: store i8* null, i8** [[TMP30]], align 8 // CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* // CHECK5-NEXT: store i64 [[TMP20]], i64* [[TMP32]], align 8 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* // CHECK5-NEXT: store i64 [[TMP20]], i64* [[TMP34]], align 8 // CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 // CHECK5-NEXT: store i8* null, i8** [[TMP35]], align 8 // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK5-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK5-NEXT: store i32 1, i32* [[TMP38]], align 4 // CHECK5-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK5-NEXT: store i32 3, i32* [[TMP39]], align 4 // CHECK5-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK5-NEXT: store i8** [[TMP36]], i8*** [[TMP40]], align 8 // CHECK5-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK5-NEXT: store i8** [[TMP37]], i8*** [[TMP41]], align 8 // CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK5-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP42]], align 8 // CHECK5-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK5-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP43]], align 8 // CHECK5-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK5-NEXT: store i8** null, i8*** [[TMP44]], align 8 // CHECK5-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK5-NEXT: store i8** null, i8*** [[TMP45]], align 8 // CHECK5-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK5-NEXT: store i64 0, i64* [[TMP46]], align 8 // CHECK5-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK5-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0 // CHECK5-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK5: omp_offload.cont: // CHECK5-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* // CHECK5-NEXT: store i32 [[TMP49]], i32* [[CONV7]], align 4 // CHECK5-NEXT: [[TMP50:%.*]] = load i64, i64* [[A_CASTED6]], align 8 // CHECK5-NEXT: [[TMP51:%.*]] = load i16, i16* [[AA]], align 2 // CHECK5-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* // CHECK5-NEXT: store i16 [[TMP51]], i16* [[CONV9]], align 2 // CHECK5-NEXT: [[TMP52:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 // CHECK5-NEXT: [[TMP53:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP53]], 10 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 // CHECK5-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i64* // CHECK5-NEXT: store i64 [[TMP50]], i64* [[TMP55]], align 8 // CHECK5-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 // CHECK5-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to i64* // CHECK5-NEXT: store i64 [[TMP50]], i64* [[TMP57]], align 8 // CHECK5-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 // CHECK5-NEXT: store i8* null, i8** [[TMP58]], align 8 // CHECK5-NEXT: [[TMP59:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 // CHECK5-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i64* // CHECK5-NEXT: store i64 [[TMP52]], i64* [[TMP60]], align 8 // CHECK5-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 // CHECK5-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* // CHECK5-NEXT: store i64 [[TMP52]], i64* [[TMP62]], align 8 // CHECK5-NEXT: [[TMP63:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 // CHECK5-NEXT: store i8* null, i8** [[TMP63]], align 8 // CHECK5-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 // CHECK5-NEXT: [[TMP65:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS13:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 // CHECK5-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 0 // CHECK5-NEXT: store i32 1, i32* [[TMP66]], align 4 // CHECK5-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 1 // CHECK5-NEXT: store i32 2, i32* [[TMP67]], align 4 // CHECK5-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 2 // CHECK5-NEXT: store i8** [[TMP64]], i8*** [[TMP68]], align 8 // CHECK5-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 3 // CHECK5-NEXT: store i8** [[TMP65]], i8*** [[TMP69]], align 8 // CHECK5-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 4 // CHECK5-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP70]], align 8 // CHECK5-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 5 // CHECK5-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP71]], align 8 // CHECK5-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 6 // CHECK5-NEXT: store i8** null, i8*** [[TMP72]], align 8 // CHECK5-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 7 // CHECK5-NEXT: store i8** null, i8*** [[TMP73]], align 8 // CHECK5-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]], i32 0, i32 8 // CHECK5-NEXT: store i64 0, i64* [[TMP74]], align 8 // CHECK5-NEXT: [[TMP75:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS13]]) // CHECK5-NEXT: [[TMP76:%.*]] = icmp ne i32 [[TMP75]], 0 // CHECK5-NEXT: br i1 [[TMP76]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] // CHECK5: omp_offload.failed14: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP50]], i64 [[TMP52]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT15]] // CHECK5: omp_offload.cont15: // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i64 [[TMP50]], i64 [[TMP52]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: [[TMP77:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: store i32 [[TMP77]], i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK5-NEXT: [[TMP78:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[CONV17:%.*]] = bitcast i64* [[A_CASTED16]] to i32* // CHECK5-NEXT: store i32 [[TMP78]], i32* [[CONV17]], align 4 // CHECK5-NEXT: [[TMP79:%.*]] = load i64, i64* [[A_CASTED16]], align 8 // CHECK5-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK5-NEXT: [[CONV18:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP80]], i32* [[CONV18]], align 4 // CHECK5-NEXT: [[TMP81:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK5-NEXT: [[TMP82:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CMP19:%.*]] = icmp sgt i32 [[TMP82]], 20 // CHECK5-NEXT: br i1 [[CMP19]], label [[OMP_IF_THEN20:%.*]], label [[OMP_IF_ELSE27:%.*]] // CHECK5: omp_if.then20: // CHECK5-NEXT: [[TMP83:%.*]] = mul nuw i64 [[TMP2]], 4 // CHECK5-NEXT: [[TMP84:%.*]] = mul nuw i64 5, [[TMP5]] // CHECK5-NEXT: [[TMP85:%.*]] = mul nuw i64 [[TMP84]], 8 // CHECK5-NEXT: [[TMP86:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8* // CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP86]], i8* align 8 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i64 80, i1 false) // CHECK5-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK5-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* // CHECK5-NEXT: store i64 [[TMP79]], i64* [[TMP88]], align 8 // CHECK5-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK5-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* // CHECK5-NEXT: store i64 [[TMP79]], i64* [[TMP90]], align 8 // CHECK5-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 // CHECK5-NEXT: store i8* null, i8** [[TMP91]], align 8 // CHECK5-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 // CHECK5-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [10 x float]** // CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP93]], align 8 // CHECK5-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 // CHECK5-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** // CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 8 // CHECK5-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 // CHECK5-NEXT: store i8* null, i8** [[TMP96]], align 8 // CHECK5-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 // CHECK5-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP98]], align 8 // CHECK5-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 // CHECK5-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP100]], align 8 // CHECK5-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 // CHECK5-NEXT: store i8* null, i8** [[TMP101]], align 8 // CHECK5-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 // CHECK5-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to float** // CHECK5-NEXT: store float* [[VLA]], float** [[TMP103]], align 8 // CHECK5-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 // CHECK5-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** // CHECK5-NEXT: store float* [[VLA]], float** [[TMP105]], align 8 // CHECK5-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 // CHECK5-NEXT: store i64 [[TMP83]], i64* [[TMP106]], align 8 // CHECK5-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK5-NEXT: store i8* null, i8** [[TMP107]], align 8 // CHECK5-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 // CHECK5-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to [5 x [10 x double]]** // CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP109]], align 8 // CHECK5-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 // CHECK5-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to [5 x [10 x double]]** // CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP111]], align 8 // CHECK5-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 4 // CHECK5-NEXT: store i8* null, i8** [[TMP112]], align 8 // CHECK5-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 // CHECK5-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* // CHECK5-NEXT: store i64 5, i64* [[TMP114]], align 8 // CHECK5-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 // CHECK5-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i64* // CHECK5-NEXT: store i64 5, i64* [[TMP116]], align 8 // CHECK5-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 5 // CHECK5-NEXT: store i8* null, i8** [[TMP117]], align 8 // CHECK5-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 // CHECK5-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i64* // CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP119]], align 8 // CHECK5-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 // CHECK5-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* // CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP121]], align 8 // CHECK5-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 6 // CHECK5-NEXT: store i8* null, i8** [[TMP122]], align 8 // CHECK5-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 // CHECK5-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double** // CHECK5-NEXT: store double* [[VLA1]], double** [[TMP124]], align 8 // CHECK5-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 // CHECK5-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to double** // CHECK5-NEXT: store double* [[VLA1]], double** [[TMP126]], align 8 // CHECK5-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 // CHECK5-NEXT: store i64 [[TMP85]], i64* [[TMP127]], align 8 // CHECK5-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 7 // CHECK5-NEXT: store i8* null, i8** [[TMP128]], align 8 // CHECK5-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 // CHECK5-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** // CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 8 // CHECK5-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 // CHECK5-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to %struct.TT** // CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP132]], align 8 // CHECK5-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 8 // CHECK5-NEXT: store i8* null, i8** [[TMP133]], align 8 // CHECK5-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 9 // CHECK5-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* // CHECK5-NEXT: store i64 [[TMP81]], i64* [[TMP135]], align 8 // CHECK5-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 9 // CHECK5-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* // CHECK5-NEXT: store i64 [[TMP81]], i64* [[TMP137]], align 8 // CHECK5-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 9 // CHECK5-NEXT: store i8* null, i8** [[TMP138]], align 8 // CHECK5-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK5-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK5-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS24:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 // CHECK5-NEXT: [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 0 // CHECK5-NEXT: store i32 1, i32* [[TMP142]], align 4 // CHECK5-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 1 // CHECK5-NEXT: store i32 10, i32* [[TMP143]], align 4 // CHECK5-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 2 // CHECK5-NEXT: store i8** [[TMP139]], i8*** [[TMP144]], align 8 // CHECK5-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 3 // CHECK5-NEXT: store i8** [[TMP140]], i8*** [[TMP145]], align 8 // CHECK5-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 4 // CHECK5-NEXT: store i64* [[TMP141]], i64** [[TMP146]], align 8 // CHECK5-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 5 // CHECK5-NEXT: store i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP147]], align 8 // CHECK5-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 6 // CHECK5-NEXT: store i8** null, i8*** [[TMP148]], align 8 // CHECK5-NEXT: [[TMP149:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 7 // CHECK5-NEXT: store i8** null, i8*** [[TMP149]], align 8 // CHECK5-NEXT: [[TMP150:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]], i32 0, i32 8 // CHECK5-NEXT: store i64 0, i64* [[TMP150]], align 8 // CHECK5-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS24]]) // CHECK5-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 // CHECK5-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK5: omp_offload.failed25: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP79]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP81]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT26]] // CHECK5: omp_offload.cont26: // CHECK5-NEXT: br label [[OMP_IF_END28:%.*]] // CHECK5: omp_if.else27: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP79]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP81]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END28]] // CHECK5: omp_if.end28: // CHECK5-NEXT: [[TMP153:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 // CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) // CHECK5-NEXT: ret i32 [[TMP153]] // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 // CHECK5-SAME: () #[[ATTR2:[0-9]+]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10:![0-9]+]] // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK5-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK5-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK5-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i32 33, i32* [[I]], align 4 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_task_entry. // CHECK5-SAME: (i32 noundef signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK5-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 // CHECK5-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK5-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 // CHECK5-NEXT: [[KERNEL_ARGS_I:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 // CHECK5-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 // CHECK5-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 // CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* // CHECK5-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* // CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) // CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) // CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) // CHECK5-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) // CHECK5-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 // CHECK5-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !25 // CHECK5-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !25 // CHECK5-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !25 // CHECK5-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !25 // CHECK5-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 0 // CHECK5-NEXT: store i32 1, i32* [[TMP11]], align 4, !noalias !25 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 1 // CHECK5-NEXT: store i32 0, i32* [[TMP12]], align 4, !noalias !25 // CHECK5-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 2 // CHECK5-NEXT: store i8** null, i8*** [[TMP13]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 3 // CHECK5-NEXT: store i8** null, i8*** [[TMP14]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 4 // CHECK5-NEXT: store i64* null, i64** [[TMP15]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 5 // CHECK5-NEXT: store i64* null, i64** [[TMP16]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 6 // CHECK5-NEXT: store i8** null, i8*** [[TMP17]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 7 // CHECK5-NEXT: store i8** null, i8*** [[TMP18]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 8 // CHECK5-NEXT: store i64 0, i64* [[TMP19]], align 8, !noalias !25 // CHECK5-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel_nowait(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i8* null, i32 0, i8* null) // CHECK5-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK5-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK5: omp_offload.failed.i: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] // CHECK5-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK5: .omp_outlined..1.exit: // CHECK5-NEXT: ret i32 0 // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[K:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[K1:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 // CHECK5-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK5-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) // CHECK5-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK5: omp.dispatch.cond: // CHECK5-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) // CHECK5-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK5: omp.dispatch.body: // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26:![0-9]+]] // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] // CHECK5-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 // CHECK5-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] // CHECK5-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK5-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK5-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK5: omp.dispatch.inc: // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK5: omp.dispatch.end: // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 // CHECK5-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i32 1, i32* [[I]], align 4 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 // CHECK5-NEXT: br i1 [[TMP15]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK5: .omp.linear.pu: // CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[K1]], align 8 // CHECK5-NEXT: store i64 [[TMP16]], i64* [[K_ADDR]], align 8 // CHECK5-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK5: .omp.linear.pu.done: // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 // CHECK5-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR2]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4 // CHECK5-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[LIN4:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[A5:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 // CHECK5-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK5-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK5-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK5-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29:![0-9]+]] // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK5-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: // CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK5-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK5-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 // CHECK5-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK5-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK5-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 // CHECK5-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 // CHECK5-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK5-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] // CHECK5-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 // CHECK5-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 // CHECK5-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 // CHECK5-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 // CHECK5-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: // CHECK5-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 // CHECK5-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) // CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK5-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: // CHECK5-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK5-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK5: .omp.linear.pu: // CHECK5-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 // CHECK5-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 4 // CHECK5-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 // CHECK5-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 4 // CHECK5-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK5: .omp.linear.pu.done: // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR2]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[TMP:%.*]] = alloca i16, align 2 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[IT:%.*]] = alloca i16, align 2 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32:![0-9]+]] // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK5-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 // CHECK5-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK5-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 // CHECK5-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 // CHECK5-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK5-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK5-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i16 22, i16* [[IT]], align 2 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 // CHECK5-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 // CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 // CHECK5-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 // CHECK5-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 // CHECK5-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 // CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK5-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 // CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK5-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 // CHECK5-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 // CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 // CHECK5-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK5-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 // CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4 // CHECK5-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 // CHECK5-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 // CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 // CHECK5-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 // CHECK5-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 // CHECK5-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[IT:%.*]] = alloca i8, align 1 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 // CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK5-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 // CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK5-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 // CHECK5-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 // CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 // CHECK5-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4 // CHECK5-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 // CHECK5-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK5-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK5: omp.dispatch.cond: // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK5-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK5-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK5: omp.dispatch.body: // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: // CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35:![0-9]+]] // CHECK5-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK5-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: // CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK5-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK5-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 // CHECK5-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK5-NEXT: store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 // CHECK5-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double // CHECK5-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 // CHECK5-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float // CHECK5-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 // CHECK5-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double // CHECK5-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 // CHECK5-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float // CHECK5-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 // CHECK5-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 // CHECK5-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK5-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK5-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] // CHECK5-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 // CHECK5-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK5-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 // CHECK5-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK5-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 // CHECK5-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 // CHECK5-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 // CHECK5-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 // CHECK5-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: // CHECK5-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK5-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP35]] // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK5: omp.dispatch.inc: // CHECK5-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK5-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 // CHECK5-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK5-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK5-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 // CHECK5-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK5: omp.dispatch.end: // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) // CHECK5-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK5-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i8 96, i8* [[IT]], align 1 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@_Z3bari // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK5-NEXT: store i32 [[ADD]], i32* [[A]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK5-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK5-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK5-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: ret i32 [[TMP8]] // // // CHECK5-LABEL: define {{[^@]+}}@_ZN2S12r1Ei // CHECK5-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[B:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 // CHECK5-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 // CHECK5-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8 // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK5-NEXT: store i32 [[ADD]], i32* [[B]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK5-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() // CHECK5-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK5-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK5-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[B]], align 4 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP6]], i32* [[CONV]], align 4 // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[B_CASTED]], align 8 // CHECK5-NEXT: [[TMP8:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* // CHECK5-NEXT: [[FROMBOOL3:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL3]], i8* [[CONV2]], align 1 // CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK5-NEXT: [[TMP10:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK5-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP10]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK5-NEXT: [[TMP11:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK5-NEXT: [[TMP12:%.*]] = mul nuw i64 [[TMP11]], 2 // CHECK5-NEXT: [[TMP13:%.*]] = bitcast [6 x i64]* [[DOTOFFLOAD_SIZES]] to i8* // CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP13]], i8* align 8 bitcast ([6 x i64]* @.offload_sizes.11 to i8*), i64 48, i1 false) // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.S1** // CHECK5-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP15]], align 8 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to double** // CHECK5-NEXT: store double* [[A]], double** [[TMP17]], align 8 // CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 // CHECK5-NEXT: store i8* null, i8** [[TMP18]], align 8 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK5-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* // CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP20]], align 8 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK5-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* // CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP22]], align 8 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK5-NEXT: store i8* null, i8** [[TMP23]], align 8 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK5-NEXT: store i64 2, i64* [[TMP25]], align 8 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* // CHECK5-NEXT: store i64 2, i64* [[TMP27]], align 8 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 // CHECK5-NEXT: store i8* null, i8** [[TMP28]], align 8 // CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 // CHECK5-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP30]], align 8 // CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 // CHECK5-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP32]], align 8 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 // CHECK5-NEXT: store i8* null, i8** [[TMP33]], align 8 // CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 // CHECK5-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** // CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 // CHECK5-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** // CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 // CHECK5-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 // CHECK5-NEXT: store i64 [[TMP12]], i64* [[TMP38]], align 8 // CHECK5-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 // CHECK5-NEXT: store i8* null, i8** [[TMP39]], align 8 // CHECK5-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 // CHECK5-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* // CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP41]], align 8 // CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 // CHECK5-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* // CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP43]], align 8 // CHECK5-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK5-NEXT: store i8* null, i8** [[TMP44]], align 8 // CHECK5-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP46:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 // CHECK5-NEXT: [[TMP48:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK5-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP48]] to i1 // CHECK5-NEXT: [[TMP49:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK5-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK5-NEXT: store i32 1, i32* [[TMP50]], align 4 // CHECK5-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK5-NEXT: store i32 6, i32* [[TMP51]], align 4 // CHECK5-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK5-NEXT: store i8** [[TMP45]], i8*** [[TMP52]], align 8 // CHECK5-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK5-NEXT: store i8** [[TMP46]], i8*** [[TMP53]], align 8 // CHECK5-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK5-NEXT: store i64* [[TMP47]], i64** [[TMP54]], align 8 // CHECK5-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK5-NEXT: store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP55]], align 8 // CHECK5-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK5-NEXT: store i8** null, i8*** [[TMP56]], align 8 // CHECK5-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK5-NEXT: store i8** null, i8*** [[TMP57]], align 8 // CHECK5-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK5-NEXT: store i64 0, i64* [[TMP58]], align 8 // CHECK5-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 [[TMP49]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK5-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK5-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK5: omp_offload.cont: // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: [[TMP61:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP61]] // CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 // CHECK5-NEXT: [[TMP62:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2 // CHECK5-NEXT: [[CONV7:%.*]] = sext i16 [[TMP62]] to i32 // CHECK5-NEXT: [[TMP63:%.*]] = load i32, i32* [[B]], align 4 // CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP63]] // CHECK5-NEXT: [[TMP64:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 // CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP64]]) // CHECK5-NEXT: ret i32 [[ADD8]] // // // CHECK5-LABEL: define {{[^@]+}}@_ZL7fstatici // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK5-NEXT: [[AAA:%.*]] = alloca i8, align 1 // CHECK5-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 // CHECK5-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK5-NEXT: store i8 0, i8* [[AAA]], align 1 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK5-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* // CHECK5-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* // CHECK5-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 // CHECK5-NEXT: store i8* null, i8** [[TMP11]], align 8 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK5-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* // CHECK5-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK5-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK5-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 // CHECK5-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK5-NEXT: store i8* null, i8** [[TMP16]], align 8 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* // CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* // CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 // CHECK5-NEXT: store i8* null, i8** [[TMP21]], align 8 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 // CHECK5-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 // CHECK5-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 // CHECK5-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK5-NEXT: store i32 1, i32* [[TMP29]], align 4 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK5-NEXT: store i32 4, i32* [[TMP30]], align 4 // CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK5-NEXT: store i8** [[TMP27]], i8*** [[TMP31]], align 8 // CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK5-NEXT: store i8** [[TMP28]], i8*** [[TMP32]], align 8 // CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK5-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP33]], align 8 // CHECK5-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK5-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP34]], align 8 // CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK5-NEXT: store i8** null, i8*** [[TMP35]], align 8 // CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK5-NEXT: store i8** null, i8*** [[TMP36]], align 8 // CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK5-NEXT: store i64 0, i64* [[TMP37]], align 8 // CHECK5-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK5-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 // CHECK5-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK5: omp_offload.cont: // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: ret i32 [[TMP40]] // // // CHECK5-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i // CHECK5-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK5-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 // CHECK5-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* // CHECK5-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* // CHECK5-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 // CHECK5-NEXT: store i8* null, i8** [[TMP9]], align 8 // CHECK5-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* // CHECK5-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK5-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* // CHECK5-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 // CHECK5-NEXT: store i8* null, i8** [[TMP14]], align 8 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 // CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 // CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 // CHECK5-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK5-NEXT: store i32 1, i32* [[TMP22]], align 4 // CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK5-NEXT: store i32 3, i32* [[TMP23]], align 4 // CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK5-NEXT: store i8** [[TMP20]], i8*** [[TMP24]], align 8 // CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK5-NEXT: store i8** [[TMP21]], i8*** [[TMP25]], align 8 // CHECK5-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK5-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64** [[TMP26]], align 8 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK5-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i64** [[TMP27]], align 8 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK5-NEXT: store i8** null, i8*** [[TMP28]], align 8 // CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK5-NEXT: store i8** null, i8*** [[TMP29]], align 8 // CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK5-NEXT: store i64 0, i64* [[TMP30]], align 8 // CHECK5-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK5-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK5-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK5: omp_offload.cont: // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: [[TMP33:%.*]] = load i32, i32* [[A]], align 4 // CHECK5-NEXT: ret i32 [[TMP33]] // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 // CHECK5-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 // CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK5-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK5-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK5-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK5-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP5]], i32* [[CONV4]], align 4 // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 // CHECK5-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1 // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], i8* [[CONV5]], align 1 // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK5-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1 // CHECK5-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK5-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 // CHECK5-NEXT: call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]] // CHECK5-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 // CHECK5-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 // CHECK5-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK5-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 // CHECK5-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* // CHECK5-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* // CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 1 // CHECK5-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: // CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK5-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: // CHECK5-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38:![0-9]+]] // CHECK5-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] // CHECK5-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: // CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 // CHECK5-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK5-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double // CHECK5-NEXT: [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK5-NEXT: store double [[ADD]], double* [[A]], align 8, !nontemporal !39, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK5-NEXT: [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !39, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 // CHECK5-NEXT: store double [[INC]], double* [[A6]], align 8, !nontemporal !39, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: [[CONV7:%.*]] = fptosi double [[INC]] to i16 // CHECK5-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]] // CHECK5-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 // CHECK5-NEXT: store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: // CHECK5-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1 // CHECK5-NEXT: store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP38]] // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 // CHECK5-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK5-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3 // CHECK5-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]] // CHECK5: cond.true11: // CHECK5-NEXT: br label [[COND_END13:%.*]] // CHECK5: cond.false12: // CHECK5-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: br label [[COND_END13]] // CHECK5: cond.end13: // CHECK5-NEXT: [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ] // CHECK5-NEXT: store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND15:%.*]] // CHECK5: omp.inner.for.cond15: // CHECK5-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK5-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK5-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]] // CHECK5: omp.inner.for.body17: // CHECK5-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK5-NEXT: [[MUL18:%.*]] = mul i64 [[TMP24]], 400 // CHECK5-NEXT: [[SUB19:%.*]] = sub i64 2000, [[MUL18]] // CHECK5-NEXT: store i64 [[SUB19]], i64* [[IT]], align 8 // CHECK5-NEXT: [[TMP25:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK5-NEXT: [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double // CHECK5-NEXT: [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00 // CHECK5-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK5-NEXT: store double [[ADD21]], double* [[A22]], align 8 // CHECK5-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK5-NEXT: [[TMP26:%.*]] = load double, double* [[A23]], align 8 // CHECK5-NEXT: [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00 // CHECK5-NEXT: store double [[INC24]], double* [[A23]], align 8 // CHECK5-NEXT: [[CONV25:%.*]] = fptosi double [[INC24]] to i16 // CHECK5-NEXT: [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK5-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]] // CHECK5-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1 // CHECK5-NEXT: store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]] // CHECK5: omp.body.continue28: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]] // CHECK5: omp.inner.for.inc29: // CHECK5-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK5-NEXT: [[ADD30:%.*]] = add i64 [[TMP28]], 1 // CHECK5-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP42:![0-9]+]] // CHECK5: omp.inner.for.end31: // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) // CHECK5-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK5-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK5-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK5-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1 // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK5-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 // CHECK5-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK5-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK5-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK5-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK5-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK5-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK5-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK5-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK5-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK5-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK5-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK5-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK5-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK5: cond.true: // CHECK5-NEXT: br label [[COND_END:%.*]] // CHECK5: cond.false: // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: br label [[COND_END]] // CHECK5: cond.end: // CHECK5-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK5-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK5-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK5: omp.inner.for.cond: // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP44:![0-9]+]] // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK5-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK5: omp.inner.for.body: // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK5-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK5-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 // CHECK5-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 // CHECK5-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 // CHECK5-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK5-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK5: omp.body.continue: // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK5: omp.inner.for.inc: // CHECK5-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK5-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP44]] // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] // CHECK5: omp.inner.for.end: // CHECK5-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK5: omp.loop.exit: // CHECK5-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK5-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK5-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK5: .omp.final.then: // CHECK5-NEXT: store i64 11, i64* [[I]], align 8 // CHECK5-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK5: .omp.final.done: // CHECK5-NEXT: ret void // // // CHECK5-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg // CHECK5-SAME: () #[[ATTR8:[0-9]+]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: call void @__tgt_register_requires(i64 1) // CHECK5-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK7-SAME: () #[[ATTR0:[0-9]+]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: ret i64 0 // // // CHECK7-LABEL: define {{[^@]+}}@_Z3fooi // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK7-NEXT: [[B:%.*]] = alloca [10 x float], align 4 // CHECK7-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 // CHECK7-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 // CHECK7-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 // CHECK7-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK7-NEXT: [[K:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[LIN:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 // CHECK7-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A_CASTED12:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [10 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [10 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 // CHECK7-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() // CHECK7-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK7-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 // CHECK7-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 // CHECK7-NEXT: [[TMP5:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 20, i32 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) // CHECK7-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to %struct.kmp_task_t_with_privates* // CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP6]], i32 0, i32 0 // CHECK7-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP5]]) // CHECK7-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK7-NEXT: store i64 [[CALL]], i64* [[K]], align 8 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: store i32 [[TMP9]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101(i32 [[TMP10]], i64* [[K]]) #[[ATTR4:[0-9]+]] // CHECK7-NEXT: store i32 12, i32* [[LIN]], align 4 // CHECK7-NEXT: [[TMP11:%.*]] = load i16, i16* [[AA]], align 2 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP11]], i16* [[CONV]], align 2 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[LIN]], align 4 // CHECK7-NEXT: store i32 [[TMP13]], i32* [[LIN_CASTED]], align 4 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: store i32 [[TMP15]], i32* [[A_CASTED2]], align 4 // CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[A_CASTED2]], align 4 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* // CHECK7-NEXT: store i32 [[TMP12]], i32* [[TMP18]], align 4 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* // CHECK7-NEXT: store i32 [[TMP12]], i32* [[TMP20]], align 4 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 // CHECK7-NEXT: store i8* null, i8** [[TMP21]], align 4 // CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK7-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK7-NEXT: store i32 [[TMP14]], i32* [[TMP23]], align 4 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK7-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* // CHECK7-NEXT: store i32 [[TMP14]], i32* [[TMP25]], align 4 // CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK7-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* // CHECK7-NEXT: store i32 [[TMP16]], i32* [[TMP28]], align 4 // CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* // CHECK7-NEXT: store i32 [[TMP16]], i32* [[TMP30]], align 4 // CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 // CHECK7-NEXT: store i8* null, i8** [[TMP31]], align 4 // CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK7-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK7-NEXT: store i32 1, i32* [[TMP34]], align 4 // CHECK7-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK7-NEXT: store i32 3, i32* [[TMP35]], align 4 // CHECK7-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK7-NEXT: store i8** [[TMP32]], i8*** [[TMP36]], align 4 // CHECK7-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK7-NEXT: store i8** [[TMP33]], i8*** [[TMP37]], align 4 // CHECK7-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK7-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP38]], align 4 // CHECK7-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK7-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP39]], align 4 // CHECK7-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK7-NEXT: store i8** null, i8*** [[TMP40]], align 4 // CHECK7-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK7-NEXT: store i8** null, i8*** [[TMP41]], align 4 // CHECK7-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK7-NEXT: store i64 0, i64* [[TMP42]], align 8 // CHECK7-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK7-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 // CHECK7-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108(i32 [[TMP12]], i32 [[TMP14]], i32 [[TMP16]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK7: omp_offload.cont: // CHECK7-NEXT: [[TMP45:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: store i32 [[TMP45]], i32* [[A_CASTED3]], align 4 // CHECK7-NEXT: [[TMP46:%.*]] = load i32, i32* [[A_CASTED3]], align 4 // CHECK7-NEXT: [[TMP47:%.*]] = load i16, i16* [[AA]], align 2 // CHECK7-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* // CHECK7-NEXT: store i16 [[TMP47]], i16* [[CONV5]], align 2 // CHECK7-NEXT: [[TMP48:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 // CHECK7-NEXT: [[TMP49:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP49]], 10 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 // CHECK7-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32* // CHECK7-NEXT: store i32 [[TMP46]], i32* [[TMP51]], align 4 // CHECK7-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 // CHECK7-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32* // CHECK7-NEXT: store i32 [[TMP46]], i32* [[TMP53]], align 4 // CHECK7-NEXT: [[TMP54:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 // CHECK7-NEXT: store i8* null, i8** [[TMP54]], align 4 // CHECK7-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 // CHECK7-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32* // CHECK7-NEXT: store i32 [[TMP48]], i32* [[TMP56]], align 4 // CHECK7-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 // CHECK7-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32* // CHECK7-NEXT: store i32 [[TMP48]], i32* [[TMP58]], align 4 // CHECK7-NEXT: [[TMP59:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 // CHECK7-NEXT: store i8* null, i8** [[TMP59]], align 4 // CHECK7-NEXT: [[TMP60:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 // CHECK7-NEXT: [[TMP61:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS9:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 // CHECK7-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 0 // CHECK7-NEXT: store i32 1, i32* [[TMP62]], align 4 // CHECK7-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 1 // CHECK7-NEXT: store i32 2, i32* [[TMP63]], align 4 // CHECK7-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 2 // CHECK7-NEXT: store i8** [[TMP60]], i8*** [[TMP64]], align 4 // CHECK7-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 3 // CHECK7-NEXT: store i8** [[TMP61]], i8*** [[TMP65]], align 4 // CHECK7-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 4 // CHECK7-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP66]], align 4 // CHECK7-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 5 // CHECK7-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP67]], align 4 // CHECK7-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 6 // CHECK7-NEXT: store i8** null, i8*** [[TMP68]], align 4 // CHECK7-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 7 // CHECK7-NEXT: store i8** null, i8*** [[TMP69]], align 4 // CHECK7-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]], i32 0, i32 8 // CHECK7-NEXT: store i64 0, i64* [[TMP70]], align 8 // CHECK7-NEXT: [[TMP71:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS9]]) // CHECK7-NEXT: [[TMP72:%.*]] = icmp ne i32 [[TMP71]], 0 // CHECK7-NEXT: br i1 [[TMP72]], label [[OMP_OFFLOAD_FAILED10:%.*]], label [[OMP_OFFLOAD_CONT11:%.*]] // CHECK7: omp_offload.failed10: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP46]], i32 [[TMP48]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT11]] // CHECK7: omp_offload.cont11: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116(i32 [[TMP46]], i32 [[TMP48]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: [[TMP73:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: store i32 [[TMP73]], i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK7-NEXT: [[TMP74:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: store i32 [[TMP74]], i32* [[A_CASTED12]], align 4 // CHECK7-NEXT: [[TMP75:%.*]] = load i32, i32* [[A_CASTED12]], align 4 // CHECK7-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK7-NEXT: store i32 [[TMP76]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK7-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK7-NEXT: [[TMP78:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CMP13:%.*]] = icmp sgt i32 [[TMP78]], 20 // CHECK7-NEXT: br i1 [[CMP13]], label [[OMP_IF_THEN14:%.*]], label [[OMP_IF_ELSE21:%.*]] // CHECK7: omp_if.then14: // CHECK7-NEXT: [[TMP79:%.*]] = mul nuw i32 [[TMP1]], 4 // CHECK7-NEXT: [[TMP80:%.*]] = sext i32 [[TMP79]] to i64 // CHECK7-NEXT: [[TMP81:%.*]] = mul nuw i32 5, [[TMP3]] // CHECK7-NEXT: [[TMP82:%.*]] = mul nuw i32 [[TMP81]], 8 // CHECK7-NEXT: [[TMP83:%.*]] = sext i32 [[TMP82]] to i64 // CHECK7-NEXT: [[TMP84:%.*]] = bitcast [10 x i64]* [[DOTOFFLOAD_SIZES]] to i8* // CHECK7-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP84]], i8* align 4 bitcast ([10 x i64]* @.offload_sizes.8 to i8*), i32 80, i1 false) // CHECK7-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0 // CHECK7-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32* // CHECK7-NEXT: store i32 [[TMP75]], i32* [[TMP86]], align 4 // CHECK7-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 0 // CHECK7-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* // CHECK7-NEXT: store i32 [[TMP75]], i32* [[TMP88]], align 4 // CHECK7-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0 // CHECK7-NEXT: store i8* null, i8** [[TMP89]], align 4 // CHECK7-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 1 // CHECK7-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [10 x float]** // CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP91]], align 4 // CHECK7-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 1 // CHECK7-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [10 x float]** // CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP93]], align 4 // CHECK7-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 1 // CHECK7-NEXT: store i8* null, i8** [[TMP94]], align 4 // CHECK7-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 2 // CHECK7-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP96]], align 4 // CHECK7-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 2 // CHECK7-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP98]], align 4 // CHECK7-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 2 // CHECK7-NEXT: store i8* null, i8** [[TMP99]], align 4 // CHECK7-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 3 // CHECK7-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to float** // CHECK7-NEXT: store float* [[VLA]], float** [[TMP101]], align 4 // CHECK7-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 3 // CHECK7-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to float** // CHECK7-NEXT: store float* [[VLA]], float** [[TMP103]], align 4 // CHECK7-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 // CHECK7-NEXT: store i64 [[TMP80]], i64* [[TMP104]], align 4 // CHECK7-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 3 // CHECK7-NEXT: store i8* null, i8** [[TMP105]], align 4 // CHECK7-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 4 // CHECK7-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [5 x [10 x double]]** // CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP107]], align 4 // CHECK7-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 4 // CHECK7-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to [5 x [10 x double]]** // CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP109]], align 4 // CHECK7-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 4 // CHECK7-NEXT: store i8* null, i8** [[TMP110]], align 4 // CHECK7-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 5 // CHECK7-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32* // CHECK7-NEXT: store i32 5, i32* [[TMP112]], align 4 // CHECK7-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 5 // CHECK7-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* // CHECK7-NEXT: store i32 5, i32* [[TMP114]], align 4 // CHECK7-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 5 // CHECK7-NEXT: store i8* null, i8** [[TMP115]], align 4 // CHECK7-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 6 // CHECK7-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP117]], align 4 // CHECK7-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 6 // CHECK7-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32* // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP119]], align 4 // CHECK7-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 6 // CHECK7-NEXT: store i8* null, i8** [[TMP120]], align 4 // CHECK7-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 7 // CHECK7-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to double** // CHECK7-NEXT: store double* [[VLA1]], double** [[TMP122]], align 4 // CHECK7-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 7 // CHECK7-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to double** // CHECK7-NEXT: store double* [[VLA1]], double** [[TMP124]], align 4 // CHECK7-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 // CHECK7-NEXT: store i64 [[TMP83]], i64* [[TMP125]], align 4 // CHECK7-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 7 // CHECK7-NEXT: store i8* null, i8** [[TMP126]], align 4 // CHECK7-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 8 // CHECK7-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to %struct.TT** // CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP128]], align 4 // CHECK7-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 8 // CHECK7-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** // CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 4 // CHECK7-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 8 // CHECK7-NEXT: store i8* null, i8** [[TMP131]], align 4 // CHECK7-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 9 // CHECK7-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* // CHECK7-NEXT: store i32 [[TMP77]], i32* [[TMP133]], align 4 // CHECK7-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 9 // CHECK7-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* // CHECK7-NEXT: store i32 [[TMP77]], i32* [[TMP135]], align 4 // CHECK7-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 9 // CHECK7-NEXT: store i8* null, i8** [[TMP136]], align 4 // CHECK7-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0 // CHECK7-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS16]], i32 0, i32 0 // CHECK7-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS18:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 // CHECK7-NEXT: [[TMP140:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 0 // CHECK7-NEXT: store i32 1, i32* [[TMP140]], align 4 // CHECK7-NEXT: [[TMP141:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 1 // CHECK7-NEXT: store i32 10, i32* [[TMP141]], align 4 // CHECK7-NEXT: [[TMP142:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 2 // CHECK7-NEXT: store i8** [[TMP137]], i8*** [[TMP142]], align 4 // CHECK7-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 3 // CHECK7-NEXT: store i8** [[TMP138]], i8*** [[TMP143]], align 4 // CHECK7-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 4 // CHECK7-NEXT: store i64* [[TMP139]], i64** [[TMP144]], align 4 // CHECK7-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 5 // CHECK7-NEXT: store i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP145]], align 4 // CHECK7-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 6 // CHECK7-NEXT: store i8** null, i8*** [[TMP146]], align 4 // CHECK7-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 7 // CHECK7-NEXT: store i8** null, i8*** [[TMP147]], align 4 // CHECK7-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 8 // CHECK7-NEXT: store i64 0, i64* [[TMP148]], align 8 // CHECK7-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]]) // CHECK7-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 // CHECK7-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] // CHECK7: omp_offload.failed19: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP75]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP77]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT20]] // CHECK7: omp_offload.cont20: // CHECK7-NEXT: br label [[OMP_IF_END22:%.*]] // CHECK7: omp_if.else21: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP75]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP77]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END22]] // CHECK7: omp_if.end22: // CHECK7-NEXT: [[TMP151:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 // CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) // CHECK7-NEXT: ret i32 [[TMP151]] // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 // CHECK7-SAME: () #[[ATTR2:[0-9]+]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK7-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i32 33, i32* [[I]], align 4 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_task_entry. // CHECK7-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 // CHECK7-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 // CHECK7-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 // CHECK7-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 // CHECK7-NEXT: [[KERNEL_ARGS_I:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 // CHECK7-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 // CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 // CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 // CHECK7-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* // CHECK7-NEXT: [[TMP9:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* // CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]]) // CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) // CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) // CHECK7-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]]) // CHECK7-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !26 // CHECK7-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !26 // CHECK7-NEXT: store i8* null, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !26 // CHECK7-NEXT: store void (i8*, ...)* null, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !26 // CHECK7-NEXT: store i8* [[TMP9]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !26 // CHECK7-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP10:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 0 // CHECK7-NEXT: store i32 1, i32* [[TMP11]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 1 // CHECK7-NEXT: store i32 0, i32* [[TMP12]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 2 // CHECK7-NEXT: store i8** null, i8*** [[TMP13]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 3 // CHECK7-NEXT: store i8** null, i8*** [[TMP14]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 4 // CHECK7-NEXT: store i64* null, i64** [[TMP15]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 5 // CHECK7-NEXT: store i64* null, i64** [[TMP16]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 6 // CHECK7-NEXT: store i8** null, i8*** [[TMP17]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 7 // CHECK7-NEXT: store i8** null, i8*** [[TMP18]], align 4, !noalias !26 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i32 8 // CHECK7-NEXT: store i64 0, i64* [[TMP19]], align 8, !noalias !26 // CHECK7-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel_nowait(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS_I]], i32 0, i8* null, i32 0, i8* null) // CHECK7-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK7-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]] // CHECK7: omp_offload.failed.i: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96() #[[ATTR4]] // CHECK7-NEXT: br label [[DOTOMP_OUTLINED__1_EXIT]] // CHECK7: .omp_outlined..1.exit: // CHECK7-NEXT: ret i32 0 // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 // CHECK7-SAME: (i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i64* noundef nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[K1:%.*]] = alloca i64, align 8 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 // CHECK7-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK7-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK7-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) // CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK7: omp.dispatch.cond: // CHECK7-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 // CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK7: omp.dispatch.body: // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27:![0-9]+]] // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] // CHECK7-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 // CHECK7-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] // CHECK7-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK7-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 // CHECK7-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK7: omp.dispatch.inc: // CHECK7-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK7: omp.dispatch.end: // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK7-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i32 1, i32* [[I]], align 4 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0 // CHECK7-NEXT: br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK7: .omp.linear.pu: // CHECK7-NEXT: [[TMP17:%.*]] = load i64, i64* [[K1]], align 8 // CHECK7-NEXT: store i64 [[TMP17]], i64* [[TMP0]], align 8 // CHECK7-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK7: .omp.linear.pu.done: // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 // CHECK7-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR2]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK7-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK7-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[LIN2:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A3:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK7-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK7-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK7-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK7-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: // CHECK7-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK7-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: // CHECK7-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]] // CHECK7-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK7-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: // CHECK7-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK7-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK7-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 // CHECK7-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK7-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] // CHECK7-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 // CHECK7-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 // CHECK7-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK7-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] // CHECK7-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 // CHECK7-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 // CHECK7-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 // CHECK7-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 // CHECK7-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: // CHECK7-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 // CHECK7-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK7-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: // CHECK7-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK7-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK7: .omp.linear.pu: // CHECK7-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 // CHECK7-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 // CHECK7-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 // CHECK7-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK7: .omp.linear.pu.done: // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR2]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[TMP:%.*]] = alloca i16, align 2 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[IT:%.*]] = alloca i16, align 2 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33:![0-9]+]] // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK7-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 // CHECK7-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK7-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 // CHECK7-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 // CHECK7-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 // CHECK7-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK7-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK7-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i16 22, i16* [[IT]], align 2 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 // CHECK7-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 // CHECK7-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 // CHECK7-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 // CHECK7-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 // CHECK7-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 // CHECK7-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK7-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 // CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK7-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 // CHECK7-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 // CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 // CHECK7-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 // CHECK7-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 // CHECK7-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 // CHECK7-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 // CHECK7-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[IT:%.*]] = alloca i8, align 1 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 // CHECK7-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK7-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 // CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK7-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 // CHECK7-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 // CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 // CHECK7-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 // CHECK7-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK7-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK7: omp.dispatch.cond: // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK7-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK7-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK7: omp.dispatch.body: // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: // CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36:![0-9]+]] // CHECK7-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK7-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK7-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK7-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK7-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK7-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 // CHECK7-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK7-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK7-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK7-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 // CHECK7-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK7-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK7-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK7-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 // CHECK7-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 // CHECK7-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK7-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK7-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] // CHECK7-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 // CHECK7-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK7-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 // CHECK7-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK7-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 // CHECK7-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK7-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK7-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 // CHECK7-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: // CHECK7-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK7-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP36]] // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK7: omp.dispatch.inc: // CHECK7-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK7-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 // CHECK7-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK7-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK7-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 // CHECK7-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK7: omp.dispatch.end: // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) // CHECK7-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK7-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i8 96, i8* [[IT]], align 1 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@_Z3bari // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK7-NEXT: store i32 [[ADD]], i32* [[A]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK7-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK7-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK7-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: ret i32 [[TMP8]] // // // CHECK7-LABEL: define {{[^@]+}}@_ZN2S12r1Ei // CHECK7-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[B:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 // CHECK7-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 // CHECK7-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4 // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK7-NEXT: store i32 [[ADD]], i32* [[B]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() // CHECK7-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK7-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* // CHECK7-NEXT: [[FROMBOOL2:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL2]], i8* [[CONV]], align 1 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK7-NEXT: [[TMP9:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK7-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK7-NEXT: [[TMP10:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK7-NEXT: [[TMP11:%.*]] = mul nuw i32 [[TMP10]], 2 // CHECK7-NEXT: [[TMP12:%.*]] = sext i32 [[TMP11]] to i64 // CHECK7-NEXT: [[TMP13:%.*]] = bitcast [6 x i64]* [[DOTOFFLOAD_SIZES]] to i8* // CHECK7-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP13]], i8* align 4 bitcast ([6 x i64]* @.offload_sizes.11 to i8*), i32 48, i1 false) // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to %struct.S1** // CHECK7-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP15]], align 4 // CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to double** // CHECK7-NEXT: store double* [[A]], double** [[TMP17]], align 4 // CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 // CHECK7-NEXT: store i8* null, i8** [[TMP18]], align 4 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK7-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* // CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP20]], align 4 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK7-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* // CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP22]], align 4 // CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK7-NEXT: store i8* null, i8** [[TMP23]], align 4 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* // CHECK7-NEXT: store i32 2, i32* [[TMP25]], align 4 // CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* // CHECK7-NEXT: store i32 2, i32* [[TMP27]], align 4 // CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 // CHECK7-NEXT: store i8* null, i8** [[TMP28]], align 4 // CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 // CHECK7-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 // CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 // CHECK7-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 // CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 // CHECK7-NEXT: store i8* null, i8** [[TMP33]], align 4 // CHECK7-NEXT: [[TMP34:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 // CHECK7-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** // CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 // CHECK7-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 // CHECK7-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** // CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 // CHECK7-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 // CHECK7-NEXT: store i64 [[TMP12]], i64* [[TMP38]], align 4 // CHECK7-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 // CHECK7-NEXT: store i8* null, i8** [[TMP39]], align 4 // CHECK7-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 // CHECK7-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* // CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP41]], align 4 // CHECK7-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 // CHECK7-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32* // CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP43]], align 4 // CHECK7-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK7-NEXT: store i8* null, i8** [[TMP44]], align 4 // CHECK7-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP46:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 // CHECK7-NEXT: [[TMP48:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK7-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP48]] to i1 // CHECK7-NEXT: [[TMP49:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK7-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK7-NEXT: store i32 1, i32* [[TMP50]], align 4 // CHECK7-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK7-NEXT: store i32 6, i32* [[TMP51]], align 4 // CHECK7-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK7-NEXT: store i8** [[TMP45]], i8*** [[TMP52]], align 4 // CHECK7-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK7-NEXT: store i8** [[TMP46]], i8*** [[TMP53]], align 4 // CHECK7-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK7-NEXT: store i64* [[TMP47]], i64** [[TMP54]], align 4 // CHECK7-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK7-NEXT: store i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP55]], align 4 // CHECK7-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK7-NEXT: store i8** null, i8*** [[TMP56]], align 4 // CHECK7-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK7-NEXT: store i8** null, i8*** [[TMP57]], align 4 // CHECK7-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK7-NEXT: store i64 0, i64* [[TMP58]], align 8 // CHECK7-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 [[TMP49]], i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK7-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK7-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK7: omp_offload.cont: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: [[TMP61:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP61]] // CHECK7-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 // CHECK7-NEXT: [[TMP62:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 // CHECK7-NEXT: [[CONV6:%.*]] = sext i16 [[TMP62]] to i32 // CHECK7-NEXT: [[TMP63:%.*]] = load i32, i32* [[B]], align 4 // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP63]] // CHECK7-NEXT: [[TMP64:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 // CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP64]]) // CHECK7-NEXT: ret i32 [[ADD7]] // // // CHECK7-LABEL: define {{[^@]+}}@_ZL7fstatici // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK7-NEXT: [[AAA:%.*]] = alloca i8, align 1 // CHECK7-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 // CHECK7-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK7-NEXT: store i8 0, i8* [[AAA]], align 1 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK7-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 // CHECK7-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 // CHECK7-NEXT: store i8* null, i8** [[TMP11]], align 4 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK7-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK7-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 // CHECK7-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK7-NEXT: store i8* null, i8** [[TMP16]], align 4 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* // CHECK7-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* // CHECK7-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 // CHECK7-NEXT: store i8* null, i8** [[TMP21]], align 4 // CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 // CHECK7-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 // CHECK7-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 // CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 // CHECK7-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK7-NEXT: store i32 1, i32* [[TMP29]], align 4 // CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK7-NEXT: store i32 4, i32* [[TMP30]], align 4 // CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK7-NEXT: store i8** [[TMP27]], i8*** [[TMP31]], align 4 // CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK7-NEXT: store i8** [[TMP28]], i8*** [[TMP32]], align 4 // CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK7-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP33]], align 4 // CHECK7-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK7-NEXT: store i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP34]], align 4 // CHECK7-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK7-NEXT: store i8** null, i8*** [[TMP35]], align 4 // CHECK7-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK7-NEXT: store i8** null, i8*** [[TMP36]], align 4 // CHECK7-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK7-NEXT: store i64 0, i64* [[TMP37]], align 8 // CHECK7-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK7-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0 // CHECK7-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK7: omp_offload.cont: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: [[TMP40:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: ret i32 [[TMP40]] // // // CHECK7-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i // CHECK7-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK7-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 // CHECK7-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 // CHECK7-NEXT: store i8* null, i8** [[TMP9]], align 4 // CHECK7-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 // CHECK7-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 // CHECK7-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* // CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 // CHECK7-NEXT: store i8* null, i8** [[TMP14]], align 4 // CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 // CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 // CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 // CHECK7-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 // CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 // CHECK7-NEXT: store i32 1, i32* [[TMP22]], align 4 // CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 // CHECK7-NEXT: store i32 3, i32* [[TMP23]], align 4 // CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 // CHECK7-NEXT: store i8** [[TMP20]], i8*** [[TMP24]], align 4 // CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 // CHECK7-NEXT: store i8** [[TMP21]], i8*** [[TMP25]], align 4 // CHECK7-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 // CHECK7-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64** [[TMP26]], align 4 // CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 // CHECK7-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i64** [[TMP27]], align 4 // CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 // CHECK7-NEXT: store i8** null, i8*** [[TMP28]], align 4 // CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 // CHECK7-NEXT: store i8** null, i8*** [[TMP29]], align 4 // CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 // CHECK7-NEXT: store i64 0, i64* [[TMP30]], align 8 // CHECK7-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) // CHECK7-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK7-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK7: omp_offload.cont: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: [[TMP33:%.*]] = load i32, i32* [[A]], align 4 // CHECK7-NEXT: ret i32 [[TMP33]] // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 // CHECK7-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK7-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK7-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 // CHECK7-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV]], align 1 // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK7-NEXT: [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], i8* [[CONV3]], align 1 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK7-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1 // CHECK7-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK7-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 // CHECK7-NEXT: call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]] // CHECK7-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 // CHECK7-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK7-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK7-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 // CHECK7-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* // CHECK7-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 1 // CHECK7-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: // CHECK7-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK7-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: // CHECK7-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK7-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: // CHECK7-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39:![0-9]+]] // CHECK7-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] // CHECK7-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: // CHECK7-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 // CHECK7-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK7-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double // CHECK7-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK7-NEXT: store double [[ADD]], double* [[A]], align 4, !nontemporal !40, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK7-NEXT: [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !40, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 // CHECK7-NEXT: store double [[INC]], double* [[A5]], align 4, !nontemporal !40, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK7-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]] // CHECK7-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 // CHECK7-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: // CHECK7-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: [[ADD8:%.*]] = add i64 [[TMP16]], 1 // CHECK7-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP39]] // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP41:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 // CHECK7-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK7-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3 // CHECK7-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]] // CHECK7: cond.true10: // CHECK7-NEXT: br label [[COND_END12:%.*]] // CHECK7: cond.false11: // CHECK7-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: br label [[COND_END12]] // CHECK7: cond.end12: // CHECK7-NEXT: [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ] // CHECK7-NEXT: store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND14:%.*]] // CHECK7: omp.inner.for.cond14: // CHECK7-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK7-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK7-NEXT: br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]] // CHECK7: omp.inner.for.body16: // CHECK7-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK7-NEXT: [[MUL17:%.*]] = mul i64 [[TMP24]], 400 // CHECK7-NEXT: [[SUB18:%.*]] = sub i64 2000, [[MUL17]] // CHECK7-NEXT: store i64 [[SUB18]], i64* [[IT]], align 8 // CHECK7-NEXT: [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK7-NEXT: [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double // CHECK7-NEXT: [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00 // CHECK7-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK7-NEXT: store double [[ADD20]], double* [[A21]], align 4 // CHECK7-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK7-NEXT: [[TMP26:%.*]] = load double, double* [[A22]], align 4 // CHECK7-NEXT: [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00 // CHECK7-NEXT: store double [[INC23]], double* [[A22]], align 4 // CHECK7-NEXT: [[CONV24:%.*]] = fptosi double [[INC23]] to i16 // CHECK7-NEXT: [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK7-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]] // CHECK7-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 // CHECK7-NEXT: store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] // CHECK7: omp.body.continue27: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] // CHECK7: omp.inner.for.inc28: // CHECK7-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK7-NEXT: [[ADD29:%.*]] = add i64 [[TMP28]], 1 // CHECK7-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP43:![0-9]+]] // CHECK7: omp.inner.for.end30: // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) // CHECK7-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK7-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK7-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK7-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1 // CHECK7-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK7-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 // CHECK7-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK7-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK7-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK7-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK7-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK7-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK7-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK7-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK7-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK7-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK7-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK7-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK7-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK7-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK7: cond.true: // CHECK7-NEXT: br label [[COND_END:%.*]] // CHECK7: cond.false: // CHECK7-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: br label [[COND_END]] // CHECK7: cond.end: // CHECK7-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK7-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK7-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK7-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK7: omp.inner.for.cond: // CHECK7-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45:![0-9]+]] // CHECK7-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK7-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK7: omp.inner.for.body: // CHECK7-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK7-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK7-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 // CHECK7-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK7-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK7: omp.body.continue: // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK7: omp.inner.for.inc: // CHECK7-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK7-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP45]] // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] // CHECK7: omp.inner.for.end: // CHECK7-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK7: omp.loop.exit: // CHECK7-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK7-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK7-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK7: .omp.final.then: // CHECK7-NEXT: store i64 11, i64* [[I]], align 8 // CHECK7-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK7: .omp.final.done: // CHECK7-NEXT: ret void // // // CHECK7-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg // CHECK7-SAME: () #[[ATTR8:[0-9]+]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: call void @__tgt_register_requires(i64 1) // CHECK7-NEXT: ret void // // // CHECK9-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK9-SAME: () #[[ATTR0:[0-9]+]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: ret i64 0 // // // CHECK9-LABEL: define {{[^@]+}}@_Z3fooi // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK9-NEXT: [[B:%.*]] = alloca [10 x float], align 4 // CHECK9-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 // CHECK9-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 // CHECK9-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[K:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[I7:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[K8:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[LIN:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[LIN27:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[A28:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 // CHECK9-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[IT53:%.*]] = alloca i16, align 2 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 // CHECK9-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[IT72:%.*]] = alloca i8, align 1 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK9-NEXT: store i32 0, i32* [[A]], align 4 // CHECK9-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK9-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() // CHECK9-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] // CHECK9-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 // CHECK9-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK9-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i32 33, i32* [[I]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK9-NEXT: store i64 [[CALL]], i64* [[K]], align 8 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 // CHECK9-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 // CHECK9-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 // CHECK9-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 // CHECK9-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK9: omp.inner.for.cond9: // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK9-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK9: omp.inner.for.body11: // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] // CHECK9-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 // CHECK9-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] // CHECK9-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK9-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK9: omp.body.continue16: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK9: omp.inner.for.inc17: // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK9-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK9: omp.inner.for.end19: // CHECK9-NEXT: store i32 1, i32* [[I7]], align 4 // CHECK9-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 // CHECK9-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 // CHECK9-NEXT: store i32 12, i32* [[LIN]], align 4 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 // CHECK9-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 // CHECK9-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 // CHECK9-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 // CHECK9-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 // CHECK9-NEXT: [[CALL26:%.*]] = call noundef i64 @_Z7get_valv() // CHECK9-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK9: omp.inner.for.cond29: // CHECK9-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9:![0-9]+]] // CHECK9-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] // CHECK9-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] // CHECK9: omp.inner.for.body31: // CHECK9-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 // CHECK9-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] // CHECK9-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 // CHECK9-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] // CHECK9-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] // CHECK9-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 // CHECK9-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 // CHECK9-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] // CHECK9-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] // CHECK9-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 // CHECK9-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 // CHECK9-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 // CHECK9-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 // CHECK9-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] // CHECK9: omp.body.continue45: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] // CHECK9: omp.inner.for.inc46: // CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 // CHECK9-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK9: omp.inner.for.end48: // CHECK9-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK9-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 // CHECK9-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 // CHECK9-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 // CHECK9-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 // CHECK9-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 // CHECK9-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 // CHECK9-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] // CHECK9: omp.inner.for.cond54: // CHECK9-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] // CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] // CHECK9-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] // CHECK9: omp.inner.for.body56: // CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 // CHECK9-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] // CHECK9-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 // CHECK9-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 // CHECK9-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 // CHECK9-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 // CHECK9-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 // CHECK9-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] // CHECK9: omp.body.continue64: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] // CHECK9: omp.inner.for.inc65: // CHECK9-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 // CHECK9-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK9: omp.inner.for.end67: // CHECK9-NEXT: store i16 22, i16* [[IT53]], align 2 // CHECK9-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 // CHECK9-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 // CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 // CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] // CHECK9: omp.inner.for.cond73: // CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] // CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] // CHECK9-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] // CHECK9: omp.inner.for.body75: // CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 // CHECK9-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] // CHECK9-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 // CHECK9-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 // CHECK9-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 // CHECK9-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double // CHECK9-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 // CHECK9-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float // CHECK9-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 // CHECK9-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double // CHECK9-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 // CHECK9-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float // CHECK9-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 // CHECK9-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 // CHECK9-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 // CHECK9-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] // CHECK9-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] // CHECK9-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 // CHECK9-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 // CHECK9-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 // CHECK9-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 // CHECK9-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 // CHECK9-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 // CHECK9-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 // CHECK9-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 // CHECK9-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] // CHECK9: omp.body.continue97: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] // CHECK9: omp.inner.for.inc98: // CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 // CHECK9-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK9: omp.inner.for.end100: // CHECK9-NEXT: store i8 96, i8* [[IT72]], align 1 // CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 // CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) // CHECK9-NEXT: ret i32 [[TMP58]] // // // CHECK9-LABEL: define {{[^@]+}}@_Z3bari // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK9-NEXT: store i32 0, i32* [[A]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK9-NEXT: store i32 [[ADD]], i32* [[A]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK9-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK9-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK9-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: ret i32 [[TMP8]] // // // CHECK9-LABEL: define {{[^@]+}}@_ZN2S12r1Ei // CHECK9-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[B:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 // CHECK9-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK9-NEXT: store i32 [[ADD]], i32* [[B]], align 4 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK9-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() // CHECK9-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 // CHECK9-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK9-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK9-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: // CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]] // CHECK9-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP6]], [[TMP7]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: [[MUL:%.*]] = mul i64 [[TMP8]], 400 // CHECK9-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK9-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP9]] to double // CHECK9-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK9-NEXT: store double [[ADD2]], double* [[A]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP10:%.*]] = load double, double* [[A3]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP10]], 1.000000e+00 // CHECK9-NEXT: store double [[INC]], double* [[A3]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 // CHECK9-NEXT: [[TMP11:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP11]] // CHECK9-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 // CHECK9-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: [[ADD6:%.*]] = add i64 [[TMP12]], 1 // CHECK9-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK9-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK9-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] // CHECK9-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i64 1 // CHECK9-NEXT: [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 // CHECK9-NEXT: [[CONV9:%.*]] = sext i16 [[TMP14]] to i32 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[B]], align 4 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP15]] // CHECK9-NEXT: [[TMP16:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 // CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP16]]) // CHECK9-NEXT: ret i32 [[ADD10]] // // // CHECK9-LABEL: define {{[^@]+}}@_ZL7fstatici // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK9-NEXT: [[AAA:%.*]] = alloca i8, align 1 // CHECK9-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK9-NEXT: store i32 0, i32* [[A]], align 4 // CHECK9-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK9-NEXT: store i8 0, i8* [[AAA]], align 1 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK9-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: ret i32 [[TMP0]] // // // CHECK9-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i // CHECK9-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK9-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK9-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK9-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK9-NEXT: store i32 0, i32* [[A]], align 4 // CHECK9-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK9-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK9: omp.inner.for.cond: // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP21:![0-9]+]] // CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK9: omp.inner.for.body: // CHECK9-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK9-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK9-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 // CHECK9-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 // CHECK9-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK9: omp.body.continue: // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 // CHECK9-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP21]] // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK9: omp.inner.for.end: // CHECK9-NEXT: store i64 11, i64* [[I]], align 8 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK9-NEXT: ret i32 [[TMP8]] // // // CHECK11-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK11-SAME: () #[[ATTR0:[0-9]+]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: ret i64 0 // // // CHECK11-LABEL: define {{[^@]+}}@_Z3fooi // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK11-NEXT: [[B:%.*]] = alloca [10 x float], align 4 // CHECK11-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 // CHECK11-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 // CHECK11-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[K:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[I7:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[K8:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[LIN:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 // CHECK11-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[LIN27:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[A28:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 // CHECK11-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[IT53:%.*]] = alloca i16, align 2 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 // CHECK11-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[IT72:%.*]] = alloca i8, align 1 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 0, i32* [[A]], align 4 // CHECK11-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() // CHECK11-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] // CHECK11-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 // CHECK11-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK11-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i32 33, i32* [[I]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK11-NEXT: store i64 [[CALL]], i64* [[K]], align 8 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 // CHECK11-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 // CHECK11-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 // CHECK11-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 // CHECK11-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK11: omp.inner.for.cond9: // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] // CHECK11-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK11: omp.inner.for.body11: // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] // CHECK11-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] // CHECK11-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 // CHECK11-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK11: omp.body.continue16: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK11: omp.inner.for.inc17: // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK11-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK11: omp.inner.for.end19: // CHECK11-NEXT: store i32 1, i32* [[I7]], align 4 // CHECK11-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 // CHECK11-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 // CHECK11-NEXT: store i32 12, i32* [[LIN]], align 4 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 // CHECK11-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 // CHECK11-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 // CHECK11-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 // CHECK11-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 // CHECK11-NEXT: [[CALL26:%.*]] = call noundef i64 @_Z7get_valv() // CHECK11-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK11: omp.inner.for.cond29: // CHECK11-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]] // CHECK11-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK11-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] // CHECK11: omp.inner.for.body31: // CHECK11-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 // CHECK11-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] // CHECK11-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 // CHECK11-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] // CHECK11-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] // CHECK11-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 // CHECK11-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 // CHECK11-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] // CHECK11-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] // CHECK11-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 // CHECK11-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 // CHECK11-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 // CHECK11-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 // CHECK11-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] // CHECK11: omp.body.continue45: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] // CHECK11: omp.inner.for.inc46: // CHECK11-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 // CHECK11-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK11: omp.inner.for.end48: // CHECK11-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 // CHECK11-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 // CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 // CHECK11-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 // CHECK11-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 // CHECK11-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 // CHECK11-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] // CHECK11: omp.inner.for.cond54: // CHECK11-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] // CHECK11-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] // CHECK11-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] // CHECK11: omp.inner.for.body56: // CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 // CHECK11-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] // CHECK11-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 // CHECK11-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 // CHECK11-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 // CHECK11-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 // CHECK11-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 // CHECK11-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] // CHECK11: omp.body.continue64: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] // CHECK11: omp.inner.for.inc65: // CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 // CHECK11-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK11: omp.inner.for.end67: // CHECK11-NEXT: store i16 22, i16* [[IT53]], align 2 // CHECK11-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 // CHECK11-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 // CHECK11-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 // CHECK11-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] // CHECK11: omp.inner.for.cond73: // CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] // CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] // CHECK11-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] // CHECK11: omp.inner.for.body75: // CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 // CHECK11-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] // CHECK11-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 // CHECK11-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 // CHECK11-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 // CHECK11-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double // CHECK11-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 // CHECK11-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float // CHECK11-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 // CHECK11-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double // CHECK11-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 // CHECK11-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float // CHECK11-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 // CHECK11-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 // CHECK11-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 // CHECK11-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK11-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] // CHECK11-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 // CHECK11-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 // CHECK11-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 // CHECK11-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 // CHECK11-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 // CHECK11-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 // CHECK11-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 // CHECK11-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 // CHECK11-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] // CHECK11: omp.body.continue97: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] // CHECK11: omp.inner.for.inc98: // CHECK11-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 // CHECK11-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK11: omp.inner.for.end100: // CHECK11-NEXT: store i8 96, i8* [[IT72]], align 1 // CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 // CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) // CHECK11-NEXT: ret i32 [[TMP56]] // // // CHECK11-LABEL: define {{[^@]+}}@_Z3bari // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 0, i32* [[A]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK11-NEXT: store i32 [[ADD]], i32* [[A]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK11-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK11-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK11-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: ret i32 [[TMP8]] // // // CHECK11-LABEL: define {{[^@]+}}@_ZN2S12r1Ei // CHECK11-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[B:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 // CHECK11-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK11-NEXT: store i32 [[ADD]], i32* [[B]], align 4 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() // CHECK11-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK11-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK11-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 [[TMP4]], i64* [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: // CHECK11-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19:![0-9]+]] // CHECK11-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: [[CMP:%.*]] = icmp ule i64 [[TMP5]], [[TMP6]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: [[MUL:%.*]] = mul i64 [[TMP7]], 400 // CHECK11-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK11-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP8]] to double // CHECK11-NEXT: [[ADD2:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK11-NEXT: store double [[ADD2]], double* [[A]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = load double, double* [[A3]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP9]], 1.000000e+00 // CHECK11-NEXT: store double [[INC]], double* [[A3]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: [[CONV4:%.*]] = fptosi double [[INC]] to i16 // CHECK11-NEXT: [[TMP10:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP10]] // CHECK11-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 // CHECK11-NEXT: store i16 [[CONV4]], i16* [[ARRAYIDX5]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: [[ADD6:%.*]] = add i64 [[TMP11]], 1 // CHECK11-NEXT: store i64 [[ADD6]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK11-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK11-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] // CHECK11-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX7]], i32 1 // CHECK11-NEXT: [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2 // CHECK11-NEXT: [[CONV9:%.*]] = sext i16 [[TMP13]] to i32 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[B]], align 4 // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[CONV9]], [[TMP14]] // CHECK11-NEXT: [[TMP15:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 // CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP15]]) // CHECK11-NEXT: ret i32 [[ADD10]] // // // CHECK11-LABEL: define {{[^@]+}}@_ZL7fstatici // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK11-NEXT: [[AAA:%.*]] = alloca i8, align 1 // CHECK11-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 0, i32* [[A]], align 4 // CHECK11-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK11-NEXT: store i8 0, i8* [[AAA]], align 1 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK11-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: ret i32 [[TMP0]] // // // CHECK11-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i // CHECK11-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK11-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK11-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK11-NEXT: store i32 0, i32* [[A]], align 4 // CHECK11-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK11-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK11: omp.inner.for.cond: // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22:![0-9]+]] // CHECK11-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK11-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK11-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK11-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 // CHECK11-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 // CHECK11-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK11: omp.body.continue: // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 // CHECK11-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP22]] // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK11: omp.inner.for.end: // CHECK11-NEXT: store i64 11, i64* [[I]], align 8 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK11-NEXT: ret i32 [[TMP8]] // // // CHECK13-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK13-SAME: () #[[ATTR0:[0-9]+]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: ret i64 0 // // // CHECK13-LABEL: define {{[^@]+}}@_Z3fooi // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK13-NEXT: [[B:%.*]] = alloca [10 x float], align 4 // CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 // CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 // CHECK13-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[K:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[I7:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[K8:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[LIN:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[_TMP20:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[LIN27:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[A28:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 // CHECK13-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[IT53:%.*]] = alloca i16, align 2 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 // CHECK13-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[IT72:%.*]] = alloca i8, align 1 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 // CHECK13-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 // CHECK13-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() // CHECK13-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP1]], align 4 // CHECK13-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64 // CHECK13-NEXT: [[TMP5:%.*]] = mul nuw i64 5, [[TMP4]] // CHECK13-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP5]], align 8 // CHECK13-NEXT: store i64 [[TMP4]], i64* [[__VLA_EXPR1]], align 8 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]] // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 5 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK13-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i32 33, i32* [[I]], align 4 // CHECK13-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK13-NEXT: store i64 [[CALL]], i64* [[K]], align 8 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 // CHECK13-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 // CHECK13-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV6]], align 4 // CHECK13-NEXT: [[TMP12:%.*]] = load i64, i64* [[K]], align 8 // CHECK13-NEXT: store i64 [[TMP12]], i64* [[DOTLINEAR_START]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK13: omp.inner.for.cond9: // CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]] // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] // CHECK13-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK13: omp.inner.for.body11: // CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP15]], 1 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] // CHECK13-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP17]], 3 // CHECK13-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP16]], [[CONV]] // CHECK13-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP18]], 1 // CHECK13-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK13: omp.body.continue16: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK13: omp.inner.for.inc17: // CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK13-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP6]] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP7:![0-9]+]] // CHECK13: omp.inner.for.end19: // CHECK13-NEXT: store i32 1, i32* [[I7]], align 4 // CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[K8]], align 8 // CHECK13-NEXT: store i64 [[TMP20]], i64* [[K]], align 8 // CHECK13-NEXT: store i32 12, i32* [[LIN]], align 4 // CHECK13-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 // CHECK13-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 // CHECK13-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 // CHECK13-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV23]], align 8 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN]], align 4 // CHECK13-NEXT: store i32 [[TMP22]], i32* [[DOTLINEAR_START24]], align 4 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: store i32 [[TMP23]], i32* [[DOTLINEAR_START25]], align 4 // CHECK13-NEXT: [[CALL26:%.*]] = call noundef i64 @_Z7get_valv() // CHECK13-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK13: omp.inner.for.cond29: // CHECK13-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9:![0-9]+]] // CHECK13-NEXT: [[TMP25:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP24]], [[TMP25]] // CHECK13-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] // CHECK13: omp.inner.for.body31: // CHECK13-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[MUL32:%.*]] = mul i64 [[TMP26]], 400 // CHECK13-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] // CHECK13-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[CONV34:%.*]] = sext i32 [[TMP27]] to i64 // CHECK13-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[MUL35:%.*]] = mul i64 [[TMP28]], [[TMP29]] // CHECK13-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] // CHECK13-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 // CHECK13-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[CONV38:%.*]] = sext i32 [[TMP30]] to i64 // CHECK13-NEXT: [[TMP31:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[MUL39:%.*]] = mul i64 [[TMP31]], [[TMP32]] // CHECK13-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] // CHECK13-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 // CHECK13-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[CONV42:%.*]] = sext i16 [[TMP33]] to i32 // CHECK13-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 // CHECK13-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 // CHECK13-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] // CHECK13: omp.body.continue45: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] // CHECK13: omp.inner.for.inc46: // CHECK13-NEXT: [[TMP34:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: [[ADD47:%.*]] = add i64 [[TMP34]], 1 // CHECK13-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP9]] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP10:![0-9]+]] // CHECK13: omp.inner.for.end48: // CHECK13-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK13-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN27]], align 4 // CHECK13-NEXT: store i32 [[TMP35]], i32* [[LIN]], align 4 // CHECK13-NEXT: [[TMP36:%.*]] = load i32, i32* [[A28]], align 4 // CHECK13-NEXT: store i32 [[TMP36]], i32* [[A]], align 4 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 // CHECK13-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 // CHECK13-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 // CHECK13-NEXT: store i32 [[TMP37]], i32* [[DOTOMP_IV52]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] // CHECK13: omp.inner.for.cond54: // CHECK13-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] // CHECK13-NEXT: [[TMP39:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP38]], [[TMP39]] // CHECK13-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] // CHECK13: omp.inner.for.body56: // CHECK13-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP40]], 4 // CHECK13-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] // CHECK13-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 // CHECK13-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP41]], 1 // CHECK13-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: [[TMP42:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: [[CONV61:%.*]] = sext i16 [[TMP42]] to i32 // CHECK13-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 // CHECK13-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 // CHECK13-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] // CHECK13: omp.body.continue64: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] // CHECK13: omp.inner.for.inc65: // CHECK13-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP43]], 1 // CHECK13-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK13: omp.inner.for.end67: // CHECK13-NEXT: store i16 22, i16* [[IT53]], align 2 // CHECK13-NEXT: [[TMP44:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 // CHECK13-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 // CHECK13-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 // CHECK13-NEXT: store i32 [[TMP45]], i32* [[DOTOMP_IV71]], align 4 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] // CHECK13: omp.inner.for.cond73: // CHECK13-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15:![0-9]+]] // CHECK13-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP46]], [[TMP47]] // CHECK13-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] // CHECK13: omp.inner.for.body75: // CHECK13-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP48]], 1 // CHECK13-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] // CHECK13-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 // CHECK13-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[TMP49:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP49]], 1 // CHECK13-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i64 0, i64 2 // CHECK13-NEXT: [[TMP50:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[CONV80:%.*]] = fpext float [[TMP50]] to double // CHECK13-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 // CHECK13-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float // CHECK13-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i64 3 // CHECK13-NEXT: [[TMP51:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[CONV84:%.*]] = fpext float [[TMP51]] to double // CHECK13-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 // CHECK13-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float // CHECK13-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i64 0, i64 1 // CHECK13-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i64 0, i64 2 // CHECK13-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[ADD89:%.*]] = fadd double [[TMP52]], 1.000000e+00 // CHECK13-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[TMP53:%.*]] = mul nsw i64 1, [[TMP4]] // CHECK13-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i64 [[TMP53]] // CHECK13-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i64 3 // CHECK13-NEXT: [[TMP54:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[ADD92:%.*]] = fadd double [[TMP54]], 1.000000e+00 // CHECK13-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 // CHECK13-NEXT: [[TMP55:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP55]], 1 // CHECK13-NEXT: store i64 [[ADD93]], i64* [[X]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 // CHECK13-NEXT: [[TMP56:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[CONV94:%.*]] = sext i8 [[TMP56]] to i32 // CHECK13-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 // CHECK13-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 // CHECK13-NEXT: store i8 [[CONV96]], i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] // CHECK13: omp.body.continue97: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] // CHECK13: omp.inner.for.inc98: // CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP57]], 1 // CHECK13-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP15]] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP16:![0-9]+]] // CHECK13: omp.inner.for.end100: // CHECK13-NEXT: store i8 96, i8* [[IT72]], align 1 // CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 // CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) // CHECK13-NEXT: ret i32 [[TMP58]] // // // CHECK13-LABEL: define {{[^@]+}}@_Z3bari // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3fooi(i32 noundef signext [[TMP0]]) // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK13-NEXT: store i32 [[ADD]], i32* [[A]], align 4 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[CALL1:%.*]] = call noundef signext i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 8 dereferenceable(8) [[S]], i32 noundef signext [[TMP2]]) // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK13-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[CALL3:%.*]] = call noundef signext i32 @_ZL7fstatici(i32 noundef signext [[TMP4]]) // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK13-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[CALL5:%.*]] = call noundef signext i32 @_Z9ftemplateIiET_i(i32 noundef signext [[TMP6]]) // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK13-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: ret i32 [[TMP8]] // // // CHECK13-LABEL: define {{[^@]+}}@_ZN2S12r1Ei // CHECK13-SAME: (%struct.S1* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[B:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 // CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 // CHECK13-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK13-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK13-NEXT: store i32 [[ADD]], i32* [[B]], align 4 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 // CHECK13-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() // CHECK13-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 // CHECK13-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] // CHECK13-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 // CHECK13-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK13-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 60 // CHECK13-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK13-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK13-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK13-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK13-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK13-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK13-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK13-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK13-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK13: omp_if.then: // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: // CHECK13-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]] // CHECK13-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP8]], [[TMP9]] // CHECK13-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: // CHECK13-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: [[MUL:%.*]] = mul i64 [[TMP10]], 400 // CHECK13-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK13-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP11]] to double // CHECK13-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK13-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK13-NEXT: store double [[ADD3]], double* [[A]], align 8, !nontemporal !19, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK13-NEXT: [[TMP12:%.*]] = load double, double* [[A4]], align 8, !nontemporal !19, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: [[INC:%.*]] = fadd double [[TMP12]], 1.000000e+00 // CHECK13-NEXT: store double [[INC]], double* [[A4]], align 8, !nontemporal !19, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK13-NEXT: [[TMP13:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP13]] // CHECK13-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 // CHECK13-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: [[ADD7:%.*]] = add i64 [[TMP14]], 1 // CHECK13-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: br label [[OMP_IF_END:%.*]] // CHECK13: omp_if.else: // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK13: omp.inner.for.cond8: // CHECK13-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK13-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK13-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP15]], [[TMP16]] // CHECK13-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] // CHECK13: omp.inner.for.body10: // CHECK13-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK13-NEXT: [[MUL11:%.*]] = mul i64 [[TMP17]], 400 // CHECK13-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] // CHECK13-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[B]], align 4 // CHECK13-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP18]] to double // CHECK13-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 // CHECK13-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK13-NEXT: store double [[ADD14]], double* [[A15]], align 8 // CHECK13-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK13-NEXT: [[TMP19:%.*]] = load double, double* [[A16]], align 8 // CHECK13-NEXT: [[INC17:%.*]] = fadd double [[TMP19]], 1.000000e+00 // CHECK13-NEXT: store double [[INC17]], double* [[A16]], align 8 // CHECK13-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 // CHECK13-NEXT: [[TMP20:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK13-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP20]] // CHECK13-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i64 1 // CHECK13-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] // CHECK13: omp.body.continue21: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] // CHECK13: omp.inner.for.inc22: // CHECK13-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK13-NEXT: [[ADD23:%.*]] = add i64 [[TMP21]], 1 // CHECK13-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK13: omp.inner.for.end24: // CHECK13-NEXT: br label [[OMP_IF_END]] // CHECK13: omp_if.end: // CHECK13-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK13-NEXT: [[TMP22:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK13-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP22]] // CHECK13-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i64 1 // CHECK13-NEXT: [[TMP23:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 // CHECK13-NEXT: [[CONV27:%.*]] = sext i16 [[TMP23]] to i32 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[B]], align 4 // CHECK13-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP24]] // CHECK13-NEXT: [[TMP25:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 // CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP25]]) // CHECK13-NEXT: ret i32 [[ADD28]] // // // CHECK13-LABEL: define {{[^@]+}}@_ZL7fstatici // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK13-NEXT: [[AAA:%.*]] = alloca i8, align 1 // CHECK13-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 // CHECK13-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK13-NEXT: store i8 0, i8* [[AAA]], align 1 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK13-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: ret i32 [[TMP0]] // // // CHECK13-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i // CHECK13-SAME: (i32 noundef signext [[N:%.*]]) #[[ATTR0]] comdat { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK13-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK13-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK13-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK13-NEXT: store i32 0, i32* [[A]], align 4 // CHECK13-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK13-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK13-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK13-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK13: omp.inner.for.cond: // CHECK13-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP24:![0-9]+]] // CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK13: omp.inner.for.body: // CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK13-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK13-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK13-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK13-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 // CHECK13-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i64 0, i64 2 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 // CHECK13-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK13: omp.body.continue: // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 // CHECK13-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK13: omp.inner.for.end: // CHECK13-NEXT: store i64 11, i64* [[I]], align 8 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK13-NEXT: ret i32 [[TMP8]] // // // CHECK15-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK15-SAME: () #[[ATTR0:[0-9]+]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: ret i64 0 // // // CHECK15-LABEL: define {{[^@]+}}@_Z3fooi // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK15-NEXT: [[B:%.*]] = alloca [10 x float], align 4 // CHECK15-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 // CHECK15-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 // CHECK15-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[K:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[I7:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[K8:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[LIN:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[_TMP20:%.*]] = alloca i64, align 4 // CHECK15-NEXT: [[DOTOMP_LB21:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[DOTOMP_UB22:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[DOTOMP_IV23:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[DOTLINEAR_START24:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTLINEAR_START25:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[LIN27:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[A28:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[_TMP49:%.*]] = alloca i16, align 2 // CHECK15-NEXT: [[DOTOMP_LB50:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_UB51:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_IV52:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[IT53:%.*]] = alloca i16, align 2 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[_TMP68:%.*]] = alloca i8, align 1 // CHECK15-NEXT: [[DOTOMP_LB69:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_UB70:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_IV71:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[IT72:%.*]] = alloca i8, align 1 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 // CHECK15-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() // CHECK15-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP0]], align 4 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 5, [[TMP2]] // CHECK15-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP3]], align 8 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[__VLA_EXPR1]], align 4 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3:![0-9]+]] // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK15-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP3]] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i32 33, i32* [[I]], align 4 // CHECK15-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() // CHECK15-NEXT: store i64 [[CALL]], i64* [[K]], align 8 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 // CHECK15-NEXT: store i32 8, i32* [[DOTOMP_UB5]], align 4 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 // CHECK15-NEXT: store i32 [[TMP9]], i32* [[DOTOMP_IV6]], align 4 // CHECK15-NEXT: [[TMP10:%.*]] = load i64, i64* [[K]], align 8 // CHECK15-NEXT: store i64 [[TMP10]], i64* [[DOTLINEAR_START]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9:%.*]] // CHECK15: omp.inner.for.cond9: // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7:![0-9]+]] // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: [[CMP10:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] // CHECK15-NEXT: br i1 [[CMP10]], label [[OMP_INNER_FOR_BODY11:%.*]], label [[OMP_INNER_FOR_END19:%.*]] // CHECK15: omp.inner.for.body11: // CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: [[MUL12:%.*]] = mul nsw i32 [[TMP13]], 1 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL12]] // CHECK15-NEXT: store i32 [[SUB]], i32* [[I7]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP15]], 3 // CHECK15-NEXT: [[CONV:%.*]] = sext i32 [[MUL13]] to i64 // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP14]], [[CONV]] // CHECK15-NEXT: store i64 [[ADD14]], i64* [[K8]], align 8, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP16]], 1 // CHECK15-NEXT: store i32 [[ADD15]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] // CHECK15: omp.body.continue16: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] // CHECK15: omp.inner.for.inc17: // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP17]], 1 // CHECK15-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group [[ACC_GRP7]] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND9]], !llvm.loop [[LOOP8:![0-9]+]] // CHECK15: omp.inner.for.end19: // CHECK15-NEXT: store i32 1, i32* [[I7]], align 4 // CHECK15-NEXT: [[TMP18:%.*]] = load i64, i64* [[K8]], align 8 // CHECK15-NEXT: store i64 [[TMP18]], i64* [[K]], align 8 // CHECK15-NEXT: store i32 12, i32* [[LIN]], align 4 // CHECK15-NEXT: store i64 0, i64* [[DOTOMP_LB21]], align 8 // CHECK15-NEXT: store i64 3, i64* [[DOTOMP_UB22]], align 8 // CHECK15-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_LB21]], align 8 // CHECK15-NEXT: store i64 [[TMP19]], i64* [[DOTOMP_IV23]], align 8 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[LIN]], align 4 // CHECK15-NEXT: store i32 [[TMP20]], i32* [[DOTLINEAR_START24]], align 4 // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: store i32 [[TMP21]], i32* [[DOTLINEAR_START25]], align 4 // CHECK15-NEXT: [[CALL26:%.*]] = call noundef i64 @_Z7get_valv() // CHECK15-NEXT: store i64 [[CALL26]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND29:%.*]] // CHECK15: omp.inner.for.cond29: // CHECK15-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10:![0-9]+]] // CHECK15-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB22]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[CMP30:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK15-NEXT: br i1 [[CMP30]], label [[OMP_INNER_FOR_BODY31:%.*]], label [[OMP_INNER_FOR_END48:%.*]] // CHECK15: omp.inner.for.body31: // CHECK15-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[MUL32:%.*]] = mul i64 [[TMP24]], 400 // CHECK15-NEXT: [[SUB33:%.*]] = sub i64 2000, [[MUL32]] // CHECK15-NEXT: store i64 [[SUB33]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTLINEAR_START24]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[CONV34:%.*]] = sext i32 [[TMP25]] to i64 // CHECK15-NEXT: [[TMP26:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[MUL35:%.*]] = mul i64 [[TMP26]], [[TMP27]] // CHECK15-NEXT: [[ADD36:%.*]] = add i64 [[CONV34]], [[MUL35]] // CHECK15-NEXT: [[CONV37:%.*]] = trunc i64 [[ADD36]] to i32 // CHECK15-NEXT: store i32 [[CONV37]], i32* [[LIN27]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTLINEAR_START25]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[CONV38:%.*]] = sext i32 [[TMP28]] to i64 // CHECK15-NEXT: [[TMP29:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[TMP30:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[MUL39:%.*]] = mul i64 [[TMP29]], [[TMP30]] // CHECK15-NEXT: [[ADD40:%.*]] = add i64 [[CONV38]], [[MUL39]] // CHECK15-NEXT: [[CONV41:%.*]] = trunc i64 [[ADD40]] to i32 // CHECK15-NEXT: store i32 [[CONV41]], i32* [[A28]], align 4, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[TMP31:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[CONV42:%.*]] = sext i16 [[TMP31]] to i32 // CHECK15-NEXT: [[ADD43:%.*]] = add nsw i32 [[CONV42]], 1 // CHECK15-NEXT: [[CONV44:%.*]] = trunc i32 [[ADD43]] to i16 // CHECK15-NEXT: store i16 [[CONV44]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE45:%.*]] // CHECK15: omp.body.continue45: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC46:%.*]] // CHECK15: omp.inner.for.inc46: // CHECK15-NEXT: [[TMP32:%.*]] = load i64, i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: [[ADD47:%.*]] = add i64 [[TMP32]], 1 // CHECK15-NEXT: store i64 [[ADD47]], i64* [[DOTOMP_IV23]], align 8, !llvm.access.group [[ACC_GRP10]] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND29]], !llvm.loop [[LOOP11:![0-9]+]] // CHECK15: omp.inner.for.end48: // CHECK15-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[LIN27]], align 4 // CHECK15-NEXT: store i32 [[TMP33]], i32* [[LIN]], align 4 // CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[A28]], align 4 // CHECK15-NEXT: store i32 [[TMP34]], i32* [[A]], align 4 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB50]], align 4 // CHECK15-NEXT: store i32 3, i32* [[DOTOMP_UB51]], align 4 // CHECK15-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_LB50]], align 4 // CHECK15-NEXT: store i32 [[TMP35]], i32* [[DOTOMP_IV52]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND54:%.*]] // CHECK15: omp.inner.for.cond54: // CHECK15-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13:![0-9]+]] // CHECK15-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTOMP_UB51]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: [[CMP55:%.*]] = icmp sle i32 [[TMP36]], [[TMP37]] // CHECK15-NEXT: br i1 [[CMP55]], label [[OMP_INNER_FOR_BODY56:%.*]], label [[OMP_INNER_FOR_END67:%.*]] // CHECK15: omp.inner.for.body56: // CHECK15-NEXT: [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: [[MUL57:%.*]] = mul nsw i32 [[TMP38]], 4 // CHECK15-NEXT: [[ADD58:%.*]] = add nsw i32 6, [[MUL57]] // CHECK15-NEXT: [[CONV59:%.*]] = trunc i32 [[ADD58]] to i16 // CHECK15-NEXT: store i16 [[CONV59]], i16* [[IT53]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: [[TMP39:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP39]], 1 // CHECK15-NEXT: store i32 [[ADD60]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: [[TMP40:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: [[CONV61:%.*]] = sext i16 [[TMP40]] to i32 // CHECK15-NEXT: [[ADD62:%.*]] = add nsw i32 [[CONV61]], 1 // CHECK15-NEXT: [[CONV63:%.*]] = trunc i32 [[ADD62]] to i16 // CHECK15-NEXT: store i16 [[CONV63]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE64:%.*]] // CHECK15: omp.body.continue64: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC65:%.*]] // CHECK15: omp.inner.for.inc65: // CHECK15-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: [[ADD66:%.*]] = add nsw i32 [[TMP41]], 1 // CHECK15-NEXT: store i32 [[ADD66]], i32* [[DOTOMP_IV52]], align 4, !llvm.access.group [[ACC_GRP13]] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND54]], !llvm.loop [[LOOP14:![0-9]+]] // CHECK15: omp.inner.for.end67: // CHECK15-NEXT: store i16 22, i16* [[IT53]], align 2 // CHECK15-NEXT: [[TMP42:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: store i32 [[TMP42]], i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB69]], align 4 // CHECK15-NEXT: store i32 25, i32* [[DOTOMP_UB70]], align 4 // CHECK15-NEXT: [[TMP43:%.*]] = load i32, i32* [[DOTOMP_LB69]], align 4 // CHECK15-NEXT: store i32 [[TMP43]], i32* [[DOTOMP_IV71]], align 4 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND73:%.*]] // CHECK15: omp.inner.for.cond73: // CHECK15-NEXT: [[TMP44:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16:![0-9]+]] // CHECK15-NEXT: [[TMP45:%.*]] = load i32, i32* [[DOTOMP_UB70]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[CMP74:%.*]] = icmp sle i32 [[TMP44]], [[TMP45]] // CHECK15-NEXT: br i1 [[CMP74]], label [[OMP_INNER_FOR_BODY75:%.*]], label [[OMP_INNER_FOR_END100:%.*]] // CHECK15: omp.inner.for.body75: // CHECK15-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[MUL76:%.*]] = mul nsw i32 [[TMP46]], 1 // CHECK15-NEXT: [[SUB77:%.*]] = sub nsw i32 122, [[MUL76]] // CHECK15-NEXT: [[CONV78:%.*]] = trunc i32 [[SUB77]] to i8 // CHECK15-NEXT: store i8 [[CONV78]], i8* [[IT72]], align 1, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[TMP47:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[ADD79:%.*]] = add nsw i32 [[TMP47]], 1 // CHECK15-NEXT: store i32 [[ADD79]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[B]], i32 0, i32 2 // CHECK15-NEXT: [[TMP48:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[CONV80:%.*]] = fpext float [[TMP48]] to double // CHECK15-NEXT: [[ADD81:%.*]] = fadd double [[CONV80]], 1.000000e+00 // CHECK15-NEXT: [[CONV82:%.*]] = fptrunc double [[ADD81]] to float // CHECK15-NEXT: store float [[CONV82]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[ARRAYIDX83:%.*]] = getelementptr inbounds float, float* [[VLA]], i32 3 // CHECK15-NEXT: [[TMP49:%.*]] = load float, float* [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[CONV84:%.*]] = fpext float [[TMP49]] to double // CHECK15-NEXT: [[ADD85:%.*]] = fadd double [[CONV84]], 1.000000e+00 // CHECK15-NEXT: [[CONV86:%.*]] = fptrunc double [[ADD85]] to float // CHECK15-NEXT: store float [[CONV86]], float* [[ARRAYIDX83]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[ARRAYIDX87:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[C]], i32 0, i32 1 // CHECK15-NEXT: [[ARRAYIDX88:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX87]], i32 0, i32 2 // CHECK15-NEXT: [[TMP50:%.*]] = load double, double* [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[ADD89:%.*]] = fadd double [[TMP50]], 1.000000e+00 // CHECK15-NEXT: store double [[ADD89]], double* [[ARRAYIDX88]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[TMP51:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK15-NEXT: [[ARRAYIDX90:%.*]] = getelementptr inbounds double, double* [[VLA1]], i32 [[TMP51]] // CHECK15-NEXT: [[ARRAYIDX91:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX90]], i32 3 // CHECK15-NEXT: [[TMP52:%.*]] = load double, double* [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[ADD92:%.*]] = fadd double [[TMP52]], 1.000000e+00 // CHECK15-NEXT: store double [[ADD92]], double* [[ARRAYIDX91]], align 8, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 0 // CHECK15-NEXT: [[TMP53:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[ADD93:%.*]] = add nsw i64 [[TMP53]], 1 // CHECK15-NEXT: store i64 [[ADD93]], i64* [[X]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[D]], i32 0, i32 1 // CHECK15-NEXT: [[TMP54:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[CONV94:%.*]] = sext i8 [[TMP54]] to i32 // CHECK15-NEXT: [[ADD95:%.*]] = add nsw i32 [[CONV94]], 1 // CHECK15-NEXT: [[CONV96:%.*]] = trunc i32 [[ADD95]] to i8 // CHECK15-NEXT: store i8 [[CONV96]], i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE97:%.*]] // CHECK15: omp.body.continue97: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC98:%.*]] // CHECK15: omp.inner.for.inc98: // CHECK15-NEXT: [[TMP55:%.*]] = load i32, i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: [[ADD99:%.*]] = add nsw i32 [[TMP55]], 1 // CHECK15-NEXT: store i32 [[ADD99]], i32* [[DOTOMP_IV71]], align 4, !llvm.access.group [[ACC_GRP16]] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND73]], !llvm.loop [[LOOP17:![0-9]+]] // CHECK15: omp.inner.for.end100: // CHECK15-NEXT: store i8 96, i8* [[IT72]], align 1 // CHECK15-NEXT: [[TMP56:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 // CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) // CHECK15-NEXT: ret i32 [[TMP56]] // // // CHECK15-LABEL: define {{[^@]+}}@_Z3bari // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z3fooi(i32 noundef [[TMP0]]) // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] // CHECK15-NEXT: store i32 [[ADD]], i32* [[A]], align 4 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[CALL1:%.*]] = call noundef i32 @_ZN2S12r1Ei(%struct.S1* noundef nonnull align 4 dereferenceable(8) [[S]], i32 noundef [[TMP2]]) // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] // CHECK15-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[CALL3:%.*]] = call noundef i32 @_ZL7fstatici(i32 noundef [[TMP4]]) // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] // CHECK15-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[CALL5:%.*]] = call noundef i32 @_Z9ftemplateIiET_i(i32 noundef [[TMP6]]) // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] // CHECK15-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: ret i32 [[TMP8]] // // // CHECK15-LABEL: define {{[^@]+}}@_ZN2S12r1Ei // CHECK15-SAME: (%struct.S1* noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[N:%.*]]) #[[ATTR0]] comdat align 2 { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[B:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 // CHECK15-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 // CHECK15-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK15-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 // CHECK15-NEXT: store i32 [[ADD]], i32* [[B]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() // CHECK15-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] // CHECK15-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 // CHECK15-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 60 // CHECK15-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8 // CHECK15-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK15-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK15-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK15-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK15-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK15-NEXT: [[TMP6:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 // CHECK15-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP6]] to i1 // CHECK15-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK15: omp_if.then: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: // CHECK15-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19:![0-9]+]] // CHECK15-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: [[CMP2:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK15-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK15-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK15-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[B]], align 4, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP10]] to double // CHECK15-NEXT: [[ADD3:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK15-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK15-NEXT: store double [[ADD3]], double* [[A]], align 4, !nontemporal !20, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK15-NEXT: [[TMP11:%.*]] = load double, double* [[A4]], align 4, !nontemporal !20, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: [[INC:%.*]] = fadd double [[TMP11]], 1.000000e+00 // CHECK15-NEXT: store double [[INC]], double* [[A4]], align 4, !nontemporal !20, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK15-NEXT: [[TMP12:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP12]] // CHECK15-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 // CHECK15-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: [[ADD7:%.*]] = add i64 [[TMP13]], 1 // CHECK15-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP19]] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: br label [[OMP_IF_END:%.*]] // CHECK15: omp_if.else: // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] // CHECK15: omp.inner.for.cond8: // CHECK15-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK15-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK15-NEXT: [[CMP9:%.*]] = icmp ule i64 [[TMP14]], [[TMP15]] // CHECK15-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END24:%.*]] // CHECK15: omp.inner.for.body10: // CHECK15-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK15-NEXT: [[MUL11:%.*]] = mul i64 [[TMP16]], 400 // CHECK15-NEXT: [[SUB12:%.*]] = sub i64 2000, [[MUL11]] // CHECK15-NEXT: store i64 [[SUB12]], i64* [[IT]], align 8 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[B]], align 4 // CHECK15-NEXT: [[CONV13:%.*]] = sitofp i32 [[TMP17]] to double // CHECK15-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.500000e+00 // CHECK15-NEXT: [[A15:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK15-NEXT: store double [[ADD14]], double* [[A15]], align 4 // CHECK15-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[THIS1]], i32 0, i32 0 // CHECK15-NEXT: [[TMP18:%.*]] = load double, double* [[A16]], align 4 // CHECK15-NEXT: [[INC17:%.*]] = fadd double [[TMP18]], 1.000000e+00 // CHECK15-NEXT: store double [[INC17]], double* [[A16]], align 4 // CHECK15-NEXT: [[CONV18:%.*]] = fptosi double [[INC17]] to i16 // CHECK15-NEXT: [[TMP19:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP19]] // CHECK15-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX19]], i32 1 // CHECK15-NEXT: store i16 [[CONV18]], i16* [[ARRAYIDX20]], align 2 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE21:%.*]] // CHECK15: omp.body.continue21: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC22:%.*]] // CHECK15: omp.inner.for.inc22: // CHECK15-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK15-NEXT: [[ADD23:%.*]] = add i64 [[TMP20]], 1 // CHECK15-NEXT: store i64 [[ADD23]], i64* [[DOTOMP_IV]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP23:![0-9]+]] // CHECK15: omp.inner.for.end24: // CHECK15-NEXT: br label [[OMP_IF_END]] // CHECK15: omp_if.end: // CHECK15-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK15-NEXT: [[TMP21:%.*]] = mul nsw i32 1, [[TMP1]] // CHECK15-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP21]] // CHECK15-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 // CHECK15-NEXT: [[TMP22:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2 // CHECK15-NEXT: [[CONV27:%.*]] = sext i16 [[TMP22]] to i32 // CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[B]], align 4 // CHECK15-NEXT: [[ADD28:%.*]] = add nsw i32 [[CONV27]], [[TMP23]] // CHECK15-NEXT: [[TMP24:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 // CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP24]]) // CHECK15-NEXT: ret i32 [[ADD28]] // // // CHECK15-LABEL: define {{[^@]+}}@_ZL7fstatici // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK15-NEXT: [[AAA:%.*]] = alloca i8, align 1 // CHECK15-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 // CHECK15-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK15-NEXT: store i8 0, i8* [[AAA]], align 1 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK15-NEXT: store i32 429496720, i32* [[DOTOMP_UB]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: ret i32 [[TMP0]] // // // CHECK15-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i // CHECK15-SAME: (i32 noundef [[N:%.*]]) #[[ATTR0]] comdat { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[A:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[AA:%.*]] = alloca i16, align 2 // CHECK15-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 // CHECK15-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK15-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK15-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 0, i32* [[A]], align 4 // CHECK15-NEXT: store i16 0, i16* [[AA]], align 2 // CHECK15-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK15-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK15-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK15-NEXT: store i64 [[TMP0]], i64* [[DOTOMP_IV]], align 8 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK15: omp.inner.for.cond: // CHECK15-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25:![0-9]+]] // CHECK15-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i64 [[TMP1]], [[TMP2]] // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP3]], 3 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK15-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[A]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 // CHECK15-NEXT: store i32 [[ADD1]], i32* [[A]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[TMP5:%.*]] = load i16, i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[CONV:%.*]] = sext i16 [[TMP5]] to i32 // CHECK15-NEXT: [[ADD2:%.*]] = add nsw i32 [[CONV]], 1 // CHECK15-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD2]] to i16 // CHECK15-NEXT: store i16 [[CONV3]], i16* [[AA]], align 2, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[B]], i32 0, i32 2 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP6]], 1 // CHECK15-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK15: omp.body.continue: // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP7]], 1 // CHECK15-NEXT: store i64 [[ADD5]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP25]] // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] // CHECK15: omp.inner.for.end: // CHECK15-NEXT: store i64 11, i64* [[I]], align 8 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 // CHECK15-NEXT: ret i32 [[TMP8]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 // CHECK17-SAME: () #[[ATTR0:[0-9]+]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK17-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK17-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK17-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: // CHECK17-NEXT: store i32 33, i32* [[I]], align 4 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK17: .omp.final.done: // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 // CHECK17-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK17-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[LIN4:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[A5:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4 // CHECK17-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 // CHECK17-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK17-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK17-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK17-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK17-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17:![0-9]+]] // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK17-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK17-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK17-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 // CHECK17-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 // CHECK17-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK17-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] // CHECK17-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 // CHECK17-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 // CHECK17-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 // CHECK17-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 // CHECK17-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 // CHECK17-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK17-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: // CHECK17-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK17: .omp.final.done: // CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK17-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK17: .omp.linear.pu: // CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 // CHECK17-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 // CHECK17-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 4 // CHECK17-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK17: .omp.linear.pu.done: // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK17-SAME: () #[[ATTR3:[0-9]+]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: ret i64 0 // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK17-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..2 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[TMP:%.*]] = alloca i16, align 2 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[IT:%.*]] = alloca i16, align 2 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]] // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK17-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 // CHECK17-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK17-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 // CHECK17-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 // CHECK17-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK17-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK17-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: // CHECK17-NEXT: store i16 22, i16* [[IT]], align 2 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK17: .omp.final.done: // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 // CHECK17-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 // CHECK17-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 // CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 // CHECK17-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 // CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 // CHECK17-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4 // CHECK17-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..3 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 // CHECK17-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[IT:%.*]] = alloca i8, align 1 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 // CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 // CHECK17-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 // CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 // CHECK17-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4 // CHECK17-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK17: omp.dispatch.cond: // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK17-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK17: omp.dispatch.body: // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: // CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]] // CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK17-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK17-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 // CHECK17-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 // CHECK17-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double // CHECK17-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 // CHECK17-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float // CHECK17-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 // CHECK17-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double // CHECK17-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 // CHECK17-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float // CHECK17-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 // CHECK17-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 // CHECK17-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK17-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK17-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] // CHECK17-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 // CHECK17-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK17-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 // CHECK17-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK17-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 // CHECK17-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 // CHECK17-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 // CHECK17-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 // CHECK17-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK17-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK17: omp.dispatch.inc: // CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK17-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 // CHECK17-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK17-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK17-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK17: omp.dispatch.end: // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) // CHECK17-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK17-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: // CHECK17-NEXT: store i8 96, i8* [[IT]], align 1 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK17: .omp.final.done: // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK17-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK17-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 // CHECK17-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 // CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK17-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK17-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK17-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26:![0-9]+]] // CHECK17-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK17-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK17-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double // CHECK17-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK17-NEXT: store double [[ADD]], double* [[A]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK17-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK17-NEXT: store double [[INC]], double* [[A5]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK17-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] // CHECK17-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 // CHECK17-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 // CHECK17-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) // CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK17-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: // CHECK17-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK17: .omp.final.done: // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 // CHECK17-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK17-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK17-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK17-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK17: cond.true: // CHECK17-NEXT: br label [[COND_END:%.*]] // CHECK17: cond.false: // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: br label [[COND_END]] // CHECK17: cond.end: // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK17: omp.inner.for.cond: // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29:![0-9]+]] // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK17-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK17-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 // CHECK17-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 // CHECK17-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK17-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK17: omp.body.continue: // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK17-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP29]] // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK17: omp.inner.for.end: // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK17: omp.loop.exit: // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) // CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK17-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK17-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK17: .omp.final.then: // CHECK17-NEXT: store i64 11, i64* [[I]], align 8 // CHECK17-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK17: .omp.final.done: // CHECK17-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 // CHECK19-SAME: () #[[ATTR0:[0-9]+]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK19-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK19-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK19-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: // CHECK19-NEXT: store i32 33, i32* [[I]], align 4 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK19: .omp.final.done: // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 // CHECK19-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK19-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK19-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK19-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[LIN2:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[A3:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 // CHECK19-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK19-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK19-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK19-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK19-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: // CHECK19-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK19-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]] // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK19-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK19-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 // CHECK19-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK19-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] // CHECK19-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 // CHECK19-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 // CHECK19-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK19-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] // CHECK19-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 // CHECK19-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 // CHECK19-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 // CHECK19-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 // CHECK19-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 // CHECK19-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK19-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: // CHECK19-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK19: .omp.final.done: // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK19-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK19: .omp.linear.pu: // CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 // CHECK19-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 // CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 // CHECK19-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK19: .omp.linear.pu.done: // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK19-SAME: () #[[ATTR3:[0-9]+]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: ret i64 0 // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK19-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..2 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i16, align 2 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[IT:%.*]] = alloca i16, align 2 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK19-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 // CHECK19-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK19-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 // CHECK19-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 // CHECK19-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK19-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK19-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: // CHECK19-NEXT: store i16 22, i16* [[IT]], align 2 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK19: .omp.final.done: // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 // CHECK19-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 // CHECK19-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 // CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 // CHECK19-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 // CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 // CHECK19-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..3 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 // CHECK19-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[IT:%.*]] = alloca i8, align 1 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 // CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 // CHECK19-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 // CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 // CHECK19-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 // CHECK19-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK19: omp.dispatch.cond: // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK19-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK19: omp.dispatch.body: // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: // CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] // CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK19-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK19-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 // CHECK19-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK19-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK19-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK19-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 // CHECK19-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK19-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK19-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK19-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 // CHECK19-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 // CHECK19-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK19-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK19-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] // CHECK19-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 // CHECK19-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK19-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 // CHECK19-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK19-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 // CHECK19-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK19-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK19-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 // CHECK19-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK19-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK19: omp.dispatch.inc: // CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK19-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 // CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK19-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK19-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK19: omp.dispatch.end: // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) // CHECK19-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK19-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: // CHECK19-NEXT: store i8 96, i8* [[IT]], align 1 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK19: .omp.final.done: // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK19-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1 // CHECK19-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK19-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216 // CHECK19-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 // CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK19-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK19-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK19-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: // CHECK19-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27:![0-9]+]] // CHECK19-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] // CHECK19-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK19-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double // CHECK19-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK19-NEXT: store double [[ADD]], double* [[A]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK19-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 // CHECK19-NEXT: store double [[INC]], double* [[A4]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 // CHECK19-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] // CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 // CHECK19-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 // CHECK19-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) // CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK19-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: // CHECK19-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK19: .omp.final.done: // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 // CHECK19-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK19-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK19-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK19-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK19: cond.true: // CHECK19-NEXT: br label [[COND_END:%.*]] // CHECK19: cond.false: // CHECK19-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: br label [[COND_END]] // CHECK19: cond.end: // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK19-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK19-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK19: omp.inner.for.cond: // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30:![0-9]+]] // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK19-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK19-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK19-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 // CHECK19-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK19-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK19: omp.body.continue: // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK19-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP30]] // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK19: omp.inner.for.end: // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK19: omp.loop.exit: // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK19: .omp.final.then: // CHECK19-NEXT: store i64 11, i64* [[I]], align 8 // CHECK19-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK19: .omp.final.done: // CHECK19-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 // CHECK21-SAME: () #[[ATTR0:[0-9]+]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK21-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: // CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11:![0-9]+]] // CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK21-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK21-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: // CHECK21-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK21-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: // CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK21-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK21-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP11]] // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK21-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK21-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: // CHECK21-NEXT: store i32 33, i32* [[I]], align 4 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK21: .omp.final.done: // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 // CHECK21-SAME: (i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK21-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 4 // CHECK21-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* // CHECK21-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 // CHECK21-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 4 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK21-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..1 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[LIN:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[LIN4:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[A5:%.*]] = alloca i32, align 4 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 4 // CHECK21-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 4 // CHECK21-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 // CHECK21-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK21-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK21-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK21-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK21-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK21-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK21-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: // CHECK21-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17:![0-9]+]] // CHECK21-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK21-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: // CHECK21-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK21-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK21-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 // CHECK21-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK21-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] // CHECK21-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 // CHECK21-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 // CHECK21-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK21-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] // CHECK21-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 // CHECK21-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 // CHECK21-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 // CHECK21-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 // CHECK21-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: // CHECK21-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 // CHECK21-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP17]] // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) // CHECK21-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK21-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: // CHECK21-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK21: .omp.final.done: // CHECK21-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK21-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK21: .omp.linear.pu: // CHECK21-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN4]], align 4 // CHECK21-NEXT: store i32 [[TMP22]], i32* [[CONV1]], align 4 // CHECK21-NEXT: [[TMP23:%.*]] = load i32, i32* [[A5]], align 4 // CHECK21-NEXT: store i32 [[TMP23]], i32* [[CONV2]], align 4 // CHECK21-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK21: .omp.linear.pu.done: // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK21-SAME: () #[[ATTR3:[0-9]+]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: ret i64 0 // // // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR0]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK21-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK21-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK21-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK21-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 // CHECK21-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..2 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]]) #[[ATTR1]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[TMP:%.*]] = alloca i16, align 2 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[IT:%.*]] = alloca i16, align 2 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK21-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: // CHECK21-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: // CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20:![0-9]+]] // CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK21-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: // CHECK21-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK21-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 // CHECK21-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK21-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 // CHECK21-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 // CHECK21-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 // CHECK21-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: // CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK21-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP20]] // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK21-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK21-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: // CHECK21-NEXT: store i16 22, i16* [[IT]], align 2 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK21: .omp.final.done: // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 // CHECK21-SAME: (i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 // CHECK21-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 // CHECK21-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 // CHECK21-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 // CHECK21-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 // CHECK21-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK21-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 // CHECK21-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK21-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 // CHECK21-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 // CHECK21-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK21-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 // CHECK21-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 // CHECK21-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* // CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK21-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK21-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 // CHECK21-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 4 // CHECK21-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK21-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 // CHECK21-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..3 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 8 dereferenceable(400) [[C:%.*]], i64 noundef [[VLA1:%.*]], i64 noundef [[VLA3:%.*]], double* noundef nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 8 dereferenceable(16) [[D:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 // CHECK21-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 // CHECK21-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 // CHECK21-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 // CHECK21-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[IT:%.*]] = alloca i8, align 1 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 // CHECK21-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK21-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 // CHECK21-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK21-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 // CHECK21-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 // CHECK21-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK21-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 // CHECK21-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 // CHECK21-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 4 // CHECK21-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 // CHECK21-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK21-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK21: omp.dispatch.cond: // CHECK21-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: // CHECK21-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK21-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK21-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 // CHECK21-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 // CHECK21-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK21-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK21: omp.dispatch.body: // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: // CHECK21-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23:![0-9]+]] // CHECK21-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK21-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: // CHECK21-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK21-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK21-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 // CHECK21-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK21-NEXT: store i32 [[ADD]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 // CHECK21-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double // CHECK21-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 // CHECK21-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float // CHECK21-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 // CHECK21-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double // CHECK21-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 // CHECK21-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float // CHECK21-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 // CHECK21-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 // CHECK21-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK21-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] // CHECK21-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] // CHECK21-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 // CHECK21-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK21-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 // CHECK21-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK21-NEXT: store i64 [[ADD22]], i64* [[X]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 // CHECK21-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 // CHECK21-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 // CHECK21-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 // CHECK21-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: // CHECK21-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK21-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP23]] // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK21: omp.dispatch.inc: // CHECK21-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK21-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK21-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 // CHECK21-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK21-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK21-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 // CHECK21-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK21: omp.dispatch.end: // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) // CHECK21-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK21-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: // CHECK21-NEXT: store i8 96, i8* [[IT]], align 1 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK21: .omp.final.done: // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 // CHECK21-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK21-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 // CHECK21-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK21-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK21-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK21-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 1 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK21-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK21-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 // CHECK21-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 // CHECK21-SAME: (%struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) // CHECK21-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK21-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 // CHECK21-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK21-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK21-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* // CHECK21-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK21-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 8 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* // CHECK21-NEXT: [[TMP5:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK21-NEXT: [[CONV4:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK21-NEXT: store i32 [[TMP5]], i32* [[CONV4]], align 4 // CHECK21-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 // CHECK21-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV3]], align 1 // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK21-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* // CHECK21-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK21-NEXT: store i8 [[FROMBOOL]], i8* [[CONV5]], align 1 // CHECK21-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 // CHECK21-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV3]], align 1 // CHECK21-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK21-NEXT: br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK21: omp_if.then: // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) // CHECK21-NEXT: br label [[OMP_IF_END:%.*]] // CHECK21: omp_if.else: // CHECK21-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK21-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK21-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 // CHECK21-NEXT: call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR2:[0-9]+]] // CHECK21-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK21-NEXT: br label [[OMP_IF_END]] // CHECK21: omp_if.end: // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..5 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 // CHECK21-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK21-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK21-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 // CHECK21-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK21-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 // CHECK21-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 // CHECK21-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 // CHECK21-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* // CHECK21-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK21-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* // CHECK21-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK21-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV3]], align 1 // CHECK21-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 // CHECK21-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK21: omp_if.then: // CHECK21-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK21-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: // CHECK21-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK21-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK21-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: // CHECK21-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26:![0-9]+]] // CHECK21-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] // CHECK21-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: // CHECK21-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 // CHECK21-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK21-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: [[TMP13:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: [[CONV5:%.*]] = sitofp i32 [[TMP13]] to double // CHECK21-NEXT: [[ADD:%.*]] = fadd double [[CONV5]], 1.500000e+00 // CHECK21-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK21-NEXT: store double [[ADD]], double* [[A]], align 8, !nontemporal !27, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK21-NEXT: [[TMP14:%.*]] = load double, double* [[A6]], align 8, !nontemporal !27, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 // CHECK21-NEXT: store double [[INC]], double* [[A6]], align 8, !nontemporal !27, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: [[CONV7:%.*]] = fptosi double [[INC]] to i16 // CHECK21-NEXT: [[TMP15:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP15]] // CHECK21-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 // CHECK21-NEXT: store i16 [[CONV7]], i16* [[ARRAYIDX8]], align 2, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: // CHECK21-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: [[ADD9:%.*]] = add i64 [[TMP16]], 1 // CHECK21-NEXT: store i64 [[ADD9]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP26]] // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_IF_END:%.*]] // CHECK21: omp_if.else: // CHECK21-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 // CHECK21-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK21-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[CMP10:%.*]] = icmp ugt i64 [[TMP19]], 3 // CHECK21-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]] // CHECK21: cond.true11: // CHECK21-NEXT: br label [[COND_END13:%.*]] // CHECK21: cond.false12: // CHECK21-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: br label [[COND_END13]] // CHECK21: cond.end13: // CHECK21-NEXT: [[COND14:%.*]] = phi i64 [ 3, [[COND_TRUE11]] ], [ [[TMP20]], [[COND_FALSE12]] ] // CHECK21-NEXT: store i64 [[COND14]], i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK21-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND15:%.*]] // CHECK21: omp.inner.for.cond15: // CHECK21-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK21-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[CMP16:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK21-NEXT: br i1 [[CMP16]], label [[OMP_INNER_FOR_BODY17:%.*]], label [[OMP_INNER_FOR_END31:%.*]] // CHECK21: omp.inner.for.body17: // CHECK21-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK21-NEXT: [[MUL18:%.*]] = mul i64 [[TMP24]], 400 // CHECK21-NEXT: [[SUB19:%.*]] = sub i64 2000, [[MUL18]] // CHECK21-NEXT: store i64 [[SUB19]], i64* [[IT]], align 8 // CHECK21-NEXT: [[TMP25:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK21-NEXT: [[CONV20:%.*]] = sitofp i32 [[TMP25]] to double // CHECK21-NEXT: [[ADD21:%.*]] = fadd double [[CONV20]], 1.500000e+00 // CHECK21-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK21-NEXT: store double [[ADD21]], double* [[A22]], align 8 // CHECK21-NEXT: [[A23:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK21-NEXT: [[TMP26:%.*]] = load double, double* [[A23]], align 8 // CHECK21-NEXT: [[INC24:%.*]] = fadd double [[TMP26]], 1.000000e+00 // CHECK21-NEXT: store double [[INC24]], double* [[A23]], align 8 // CHECK21-NEXT: [[CONV25:%.*]] = fptosi double [[INC24]] to i16 // CHECK21-NEXT: [[TMP27:%.*]] = mul nsw i64 1, [[TMP2]] // CHECK21-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP27]] // CHECK21-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX26]], i64 1 // CHECK21-NEXT: store i16 [[CONV25]], i16* [[ARRAYIDX27]], align 2 // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]] // CHECK21: omp.body.continue28: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]] // CHECK21: omp.inner.for.inc29: // CHECK21-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK21-NEXT: [[ADD30:%.*]] = add i64 [[TMP28]], 1 // CHECK21-NEXT: store i64 [[ADD30]], i64* [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND15]], !llvm.loop [[LOOP30:![0-9]+]] // CHECK21: omp.inner.for.end31: // CHECK21-NEXT: br label [[OMP_IF_END]] // CHECK21: omp_if.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) // CHECK21-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK21-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: // CHECK21-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK21: .omp.final.done: // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 // CHECK21-SAME: (i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK21-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK21-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK21-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 4 // CHECK21-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* // CHECK21-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 // CHECK21-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 // CHECK21-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 2 // CHECK21-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK21-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 // CHECK21-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK21-NEXT: ret void // // // CHECK21-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK21-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { // CHECK21-NEXT: entry: // CHECK21-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 // CHECK21-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK21-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[TMP:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK21-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK21-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK21-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK21-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 // CHECK21-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 // CHECK21-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 // CHECK21-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* // CHECK21-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* // CHECK21-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 // CHECK21-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK21-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK21-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 // CHECK21-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK21-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK21-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK21-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK21: cond.true: // CHECK21-NEXT: br label [[COND_END:%.*]] // CHECK21: cond.false: // CHECK21-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: br label [[COND_END]] // CHECK21: cond.end: // CHECK21-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK21-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK21-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK21-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK21: omp.inner.for.cond: // CHECK21-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP32:![0-9]+]] // CHECK21-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK21-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK21: omp.inner.for.body: // CHECK21-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK21-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK21-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK21-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 // CHECK21-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 // CHECK21-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 // CHECK21-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 2, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 // CHECK21-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK21-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK21: omp.body.continue: // CHECK21-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK21: omp.inner.for.inc: // CHECK21-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK21-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP32]] // CHECK21-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] // CHECK21: omp.inner.for.end: // CHECK21-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK21: omp.loop.exit: // CHECK21-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) // CHECK21-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK21-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK21-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK21: .omp.final.then: // CHECK21-NEXT: store i64 11, i64* [[I]], align 8 // CHECK21-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK21: .omp.final.done: // CHECK21-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 // CHECK23-SAME: () #[[ATTR0:[0-9]+]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined. // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[I:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK23-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12:![0-9]+]] // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: // CHECK23-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] // CHECK23-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK23-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP12]] // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK23-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK23-NEXT: br i1 [[TMP10]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: // CHECK23-NEXT: store i32 33, i32* [[I]], align 4 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK23: .omp.final.done: // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l108 // CHECK23-SAME: (i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK23-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK23-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..1 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[LIN:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK23-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[LIN2:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[A3:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 // CHECK23-NEXT: [[CALL:%.*]] = call noundef i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] // CHECK23-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 // CHECK23-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK23-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 // CHECK23-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK23-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: // CHECK23-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] // CHECK23-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK23-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: // CHECK23-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18:![0-9]+]] // CHECK23-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] // CHECK23-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: // CHECK23-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 // CHECK23-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK23-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 // CHECK23-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] // CHECK23-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] // CHECK23-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 // CHECK23-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 // CHECK23-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] // CHECK23-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] // CHECK23-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 // CHECK23-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 // CHECK23-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 // CHECK23-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 // CHECK23-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: // CHECK23-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 // CHECK23-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP18]] // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) // CHECK23-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 // CHECK23-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: // CHECK23-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK23: .omp.final.done: // CHECK23-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0 // CHECK23-NEXT: br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] // CHECK23: .omp.linear.pu: // CHECK23-NEXT: [[TMP22:%.*]] = load i32, i32* [[LIN2]], align 4 // CHECK23-NEXT: store i32 [[TMP22]], i32* [[LIN_ADDR]], align 4 // CHECK23-NEXT: [[TMP23:%.*]] = load i32, i32* [[A3]], align 4 // CHECK23-NEXT: store i32 [[TMP23]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] // CHECK23: .omp.linear.pu.done: // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@_Z7get_valv // CHECK23-SAME: () #[[ATTR3:[0-9]+]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: ret i64 0 // // // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l116 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR0]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK23-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK23-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..2 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]]) #[[ATTR1]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[TMP:%.*]] = alloca i16, align 2 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[IT:%.*]] = alloca i16, align 2 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 // CHECK23-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21:![0-9]+]] // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: // CHECK23-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] // CHECK23-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 // CHECK23-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 // CHECK23-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 // CHECK23-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 // CHECK23-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 // CHECK23-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: // CHECK23-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 // CHECK23-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP21]] // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) // CHECK23-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 // CHECK23-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: // CHECK23-NEXT: store i16 22, i16* [[IT]], align 2 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK23: .omp.final.done: // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140 // CHECK23-SAME: (i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 // CHECK23-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 // CHECK23-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 // CHECK23-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 // CHECK23-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 // CHECK23-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK23-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 // CHECK23-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK23-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 // CHECK23-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 // CHECK23-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 // CHECK23-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 // CHECK23-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..3 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], [10 x float]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[VLA:%.*]], float* noundef nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[VLA1:%.*]], i32 noundef [[VLA3:%.*]], double* noundef nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* noundef nonnull align 4 dereferenceable(12) [[D:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 // CHECK23-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 // CHECK23-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 // CHECK23-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 // CHECK23-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[TMP:%.*]] = alloca i8, align 1 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[IT:%.*]] = alloca i8, align 1 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 // CHECK23-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK23-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 // CHECK23-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK23-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 // CHECK23-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 // CHECK23-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 // CHECK23-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 // CHECK23-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 // CHECK23-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) // CHECK23-NEXT: br label [[OMP_DISPATCH_COND:%.*]] // CHECK23: omp.dispatch.cond: // CHECK23-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: // CHECK23-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] // CHECK23-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK23-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 // CHECK23-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 // CHECK23-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] // CHECK23-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] // CHECK23: omp.dispatch.body: // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: // CHECK23-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24:![0-9]+]] // CHECK23-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] // CHECK23-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: // CHECK23-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 // CHECK23-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] // CHECK23-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 // CHECK23-NEXT: store i8 [[CONV]], i8* [[IT]], align 1, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 // CHECK23-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 // CHECK23-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double // CHECK23-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 // CHECK23-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float // CHECK23-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 // CHECK23-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double // CHECK23-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 // CHECK23-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float // CHECK23-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 // CHECK23-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 // CHECK23-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 // CHECK23-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] // CHECK23-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] // CHECK23-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 // CHECK23-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 // CHECK23-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 // CHECK23-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 // CHECK23-NEXT: store i64 [[ADD20]], i64* [[X]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 // CHECK23-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 // CHECK23-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 // CHECK23-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 // CHECK23-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: // CHECK23-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 // CHECK23-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP24]] // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_DISPATCH_INC:%.*]] // CHECK23: omp.dispatch.inc: // CHECK23-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 // CHECK23-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] // CHECK23-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 // CHECK23-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 // CHECK23-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] // CHECK23-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 // CHECK23-NEXT: br label [[OMP_DISPATCH_COND]] // CHECK23: omp.dispatch.end: // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) // CHECK23-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK23-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: // CHECK23-NEXT: store i8 96, i8* [[IT]], align 1 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK23: .omp.final.done: // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 // CHECK23-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK23-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK23-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK23-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 1 // CHECK23-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK23-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 // CHECK23-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214 // CHECK23-SAME: (%struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) // CHECK23-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK23-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK23-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK23-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load i16*, i16** [[C_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* // CHECK23-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP5]], i32* [[B_CASTED]], align 4 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[B_CASTED]], align 4 // CHECK23-NEXT: [[TMP7:%.*]] = load i8, i8* [[CONV]], align 1 // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP7]] to i1 // CHECK23-NEXT: [[CONV3:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__CASTED]] to i8* // CHECK23-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK23-NEXT: store i8 [[FROMBOOL]], i8* [[CONV3]], align 1 // CHECK23-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK23-NEXT: [[TMP9:%.*]] = load i8, i8* [[CONV]], align 1 // CHECK23-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK23-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK23: omp_if.then: // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) // CHECK23-NEXT: br label [[OMP_IF_END:%.*]] // CHECK23: omp_if.else: // CHECK23-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK23-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK23-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 // CHECK23-NEXT: call void @.omp_outlined..5(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR2:[0-9]+]] // CHECK23-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK23-NEXT: br label [[OMP_IF_END]] // CHECK23: omp_if.end: // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..5 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 // CHECK23-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[IT:%.*]] = alloca i64, align 8 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK23-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK23-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 // CHECK23-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK23-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 // CHECK23-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 // CHECK23-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK23-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[DOTCAPTURE_EXPR__ADDR]] to i8* // CHECK23-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK23-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP4:%.*]] = load i8, i8* [[CONV]], align 1 // CHECK23-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP4]] to i1 // CHECK23-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK23: omp_if.then: // CHECK23-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP6]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK23-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP7]], 3 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: // CHECK23-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP8]], [[COND_FALSE]] ] // CHECK23-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK23-NEXT: store i64 [[TMP9]], i64* [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: // CHECK23-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27:![0-9]+]] // CHECK23-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP10]], [[TMP11]] // CHECK23-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: // CHECK23-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: [[MUL:%.*]] = mul i64 [[TMP12]], 400 // CHECK23-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] // CHECK23-NEXT: store i64 [[SUB]], i64* [[IT]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: [[TMP13:%.*]] = load i32, i32* [[B_ADDR]], align 4, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP13]] to double // CHECK23-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 // CHECK23-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK23-NEXT: store double [[ADD]], double* [[A]], align 4, !nontemporal !28, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK23-NEXT: [[TMP14:%.*]] = load double, double* [[A5]], align 4, !nontemporal !28, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: [[INC:%.*]] = fadd double [[TMP14]], 1.000000e+00 // CHECK23-NEXT: store double [[INC]], double* [[A5]], align 4, !nontemporal !28, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 // CHECK23-NEXT: [[TMP15:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP15]] // CHECK23-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 // CHECK23-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: // CHECK23-NEXT: [[TMP16:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: [[ADD8:%.*]] = add i64 [[TMP16]], 1 // CHECK23-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP27]] // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_IF_END:%.*]] // CHECK23: omp_if.else: // CHECK23-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 4 // CHECK23-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP18]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK23-NEXT: [[TMP19:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[CMP9:%.*]] = icmp ugt i64 [[TMP19]], 3 // CHECK23-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]] // CHECK23: cond.true10: // CHECK23-NEXT: br label [[COND_END12:%.*]] // CHECK23: cond.false11: // CHECK23-NEXT: [[TMP20:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: br label [[COND_END12]] // CHECK23: cond.end12: // CHECK23-NEXT: [[COND13:%.*]] = phi i64 [ 3, [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ] // CHECK23-NEXT: store i64 [[COND13]], i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK23-NEXT: store i64 [[TMP21]], i64* [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND14:%.*]] // CHECK23: omp.inner.for.cond14: // CHECK23-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK23-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[CMP15:%.*]] = icmp ule i64 [[TMP22]], [[TMP23]] // CHECK23-NEXT: br i1 [[CMP15]], label [[OMP_INNER_FOR_BODY16:%.*]], label [[OMP_INNER_FOR_END30:%.*]] // CHECK23: omp.inner.for.body16: // CHECK23-NEXT: [[TMP24:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK23-NEXT: [[MUL17:%.*]] = mul i64 [[TMP24]], 400 // CHECK23-NEXT: [[SUB18:%.*]] = sub i64 2000, [[MUL17]] // CHECK23-NEXT: store i64 [[SUB18]], i64* [[IT]], align 8 // CHECK23-NEXT: [[TMP25:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK23-NEXT: [[CONV19:%.*]] = sitofp i32 [[TMP25]] to double // CHECK23-NEXT: [[ADD20:%.*]] = fadd double [[CONV19]], 1.500000e+00 // CHECK23-NEXT: [[A21:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK23-NEXT: store double [[ADD20]], double* [[A21]], align 4 // CHECK23-NEXT: [[A22:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 // CHECK23-NEXT: [[TMP26:%.*]] = load double, double* [[A22]], align 4 // CHECK23-NEXT: [[INC23:%.*]] = fadd double [[TMP26]], 1.000000e+00 // CHECK23-NEXT: store double [[INC23]], double* [[A22]], align 4 // CHECK23-NEXT: [[CONV24:%.*]] = fptosi double [[INC23]] to i16 // CHECK23-NEXT: [[TMP27:%.*]] = mul nsw i32 1, [[TMP2]] // CHECK23-NEXT: [[ARRAYIDX25:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP27]] // CHECK23-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX25]], i32 1 // CHECK23-NEXT: store i16 [[CONV24]], i16* [[ARRAYIDX26]], align 2 // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] // CHECK23: omp.body.continue27: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] // CHECK23: omp.inner.for.inc28: // CHECK23-NEXT: [[TMP28:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 // CHECK23-NEXT: [[ADD29:%.*]] = add i64 [[TMP28]], 1 // CHECK23-NEXT: store i64 [[ADD29]], i64* [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND14]], !llvm.loop [[LOOP31:![0-9]+]] // CHECK23: omp.inner.for.end30: // CHECK23-NEXT: br label [[OMP_IF_END]] // CHECK23: omp_if.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) // CHECK23-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 // CHECK23-NEXT: br i1 [[TMP32]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: // CHECK23-NEXT: store i64 400, i64* [[IT]], align 8 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK23: .omp.final.done: // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178 // CHECK23-SAME: (i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK23-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK23-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 // CHECK23-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 2 // CHECK23-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK23-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK23-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 // CHECK23-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK23-NEXT: ret void // // // CHECK23-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK23-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { // CHECK23-NEXT: entry: // CHECK23-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 // CHECK23-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK23-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[TMP:%.*]] = alloca i64, align 4 // CHECK23-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 // CHECK23-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 // CHECK23-NEXT: [[I:%.*]] = alloca i64, align 8 // CHECK23-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK23-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 // CHECK23-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 // CHECK23-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 // CHECK23-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* // CHECK23-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 // CHECK23-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 // CHECK23-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 // CHECK23-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK23-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 // CHECK23-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) // CHECK23-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 // CHECK23-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] // CHECK23: cond.true: // CHECK23-NEXT: br label [[COND_END:%.*]] // CHECK23: cond.false: // CHECK23-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: br label [[COND_END]] // CHECK23: cond.end: // CHECK23-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] // CHECK23-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 // CHECK23-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 // CHECK23-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] // CHECK23: omp.inner.for.cond: // CHECK23-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33:![0-9]+]] // CHECK23-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] // CHECK23-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] // CHECK23: omp.inner.for.body: // CHECK23-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 // CHECK23-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] // CHECK23-NEXT: store i64 [[ADD]], i64* [[I]], align 8, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 // CHECK23-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 // CHECK23-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 // CHECK23-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 // CHECK23-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 2, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 // CHECK23-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 // CHECK23-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] // CHECK23: omp.body.continue: // CHECK23-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK23: omp.inner.for.inc: // CHECK23-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 // CHECK23-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8, !llvm.access.group [[ACC_GRP33]] // CHECK23-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] // CHECK23: omp.inner.for.end: // CHECK23-NEXT: br label [[OMP_LOOP_EXIT:%.*]] // CHECK23: omp.loop.exit: // CHECK23-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) // CHECK23-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 // CHECK23-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 // CHECK23-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] // CHECK23: .omp.final.then: // CHECK23-NEXT: store i64 11, i64* [[I]], align 8 // CHECK23-NEXT: br label [[DOTOMP_FINAL_DONE]] // CHECK23: .omp.final.done: // CHECK23-NEXT: ret void //