1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs 2 //RUN: %clang_cc1 -disable-noundef-analysis -triple x86_64-unknown-linux-gnu -fopenmp -DNORM \ 3 //RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix NORM 4 5 //RUN: %clang_cc1 -disable-noundef-analysis -triple x86_64-unknown-linux-gnu -fopenmp -DCOMP \ 6 //RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix COMP 7 8 // Prefer compound operators since that is what the spec seems to say. 9 //RUN: %clang_cc1 -disable-noundef-analysis -triple x86_64-unknown-linux-gnu -fopenmp -DNORM -DCOMP \ 10 //RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix COMP 11 12 //RUN: %clang_cc1 -disable-noundef-analysis -triple x86_64-unknown-linux-gnu -fopenmp-simd -DNORM \ 13 //RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix SIMD-ONLY 14 15 //RUN: %clang_cc1 -disable-noundef-analysis -triple x86_64-unknown-linux-gnu -fopenmp-simd -DCOMP \ 16 //RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix SIMD-ONLY 17 18 //RUN: %clang_cc1 -disable-noundef-analysis -triple x86_64-unknown-linux-gnu -fopenmp-simd -DNORM -DCOMP \ 19 //RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix SIMD-ONLY 20 21 // SIMD-ONLY-NOT: {{__kmpc|__tgt}} 22 23 struct Point { 24 int x = 0; 25 int y = 0; 26 #if NORM 27 Point operator+(Point const &other) const; 28 Point operator-(Point const &other) const; 29 Point operator*(Point const &other) const; 30 Point operator&(Point const &other) const; 31 Point operator|(Point const &other) const; 32 Point operator^(Point const &other) const; 33 #endif 34 Point operator&&(Point const &other) const; 35 Point operator||(Point const &other) const; 36 Point &operator=(Point const &other); 37 #if COMP 38 Point &operator+=(Point const &other); 39 Point &operator*=(Point const &other); 40 Point &operator&=(Point const &other); 41 Point &operator|=(Point const &other); 42 Point &operator^=(Point const &other); 43 #endif 44 }; 45 46 void work(Point &P, int N, Point const *Points); 47 48 void foo(int N, Point const *Points) { 49 Point Red; 50 #pragma omp parallel for reduction(+: Red) 51 for (unsigned I = 0; I < N; ++I) 52 work(Red, I, Points); 53 54 #pragma omp parallel for reduction(-: Red) 55 for (unsigned I = 0; I < N; ++I) 56 work(Red, I, Points); 57 58 #pragma omp parallel for reduction(*: Red) 59 for (unsigned I = 0; I < N; ++I) 60 work(Red, I, Points); 61 62 #pragma omp parallel for reduction(&: Red) 63 for (unsigned I = 0; I < N; ++I) 64 work(Red, I, Points); 65 66 #pragma omp parallel for reduction(|: Red) 67 for (unsigned I = 0; I < N; ++I) 68 work(Red, I, Points); 69 70 #pragma omp parallel for reduction(^: Red) 71 for (unsigned I = 0; I < N; ++I) 72 work(Red, I, Points); 73 74 #pragma omp parallel for reduction(&&: Red) 75 for (unsigned I = 0; I < N; ++I) 76 work(Red, I, Points); 77 78 #pragma omp parallel for reduction(||: Red) 79 for (unsigned I = 0; I < N; ++I) 80 work(Red, I, Points); 81 } 82 // NORM-LABEL: define {{[^@]+}}@_Z3fooiPK5Point 83 // NORM-SAME: (i32 [[N:%.*]], %struct.Point* [[POINTS:%.*]]) #[[ATTR0:[0-9]+]] { 84 // NORM-NEXT: entry: 85 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 86 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point*, align 8 87 // NORM-NEXT: [[RED:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 88 // NORM-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 89 // NORM-NEXT: store %struct.Point* [[POINTS]], %struct.Point** [[POINTS_ADDR]], align 8 90 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED]]) #[[ATTR4:[0-9]+]] 91 // NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 92 // NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 93 // NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 94 // NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 95 // NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 96 // NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 97 // NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 98 // NORM-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 99 // NORM-NEXT: ret void 100 // 101 // 102 // NORM-LABEL: define {{[^@]+}}@_ZN5PointC1Ev 103 // NORM-SAME: (%struct.Point* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { 104 // NORM-NEXT: entry: 105 // NORM-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.Point*, align 8 106 // NORM-NEXT: store %struct.Point* [[THIS]], %struct.Point** [[THIS_ADDR]], align 8 107 // NORM-NEXT: [[THIS1:%.*]] = load %struct.Point*, %struct.Point** [[THIS_ADDR]], align 8 108 // NORM-NEXT: call void @_ZN5PointC2Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]] 109 // NORM-NEXT: ret void 110 // 111 // 112 // NORM-LABEL: define {{[^@]+}}@.omp_outlined. 113 // NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2:[0-9]+]] { 114 // NORM-NEXT: entry: 115 // NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 116 // NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 117 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 118 // NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 119 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 120 // NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 121 // NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4 122 // NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 123 // NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 124 // NORM-NEXT: [[I:%.*]] = alloca i32, align 4 125 // NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 126 // NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 127 // NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 128 // NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 129 // NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 130 // NORM-NEXT: [[I4:%.*]] = alloca i32, align 4 131 // NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 132 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 133 // NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 134 // NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 135 // NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 136 // NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 137 // NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 138 // NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 139 // NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 140 // NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 141 // NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 142 // NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 143 // NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 144 // NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 145 // NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 146 // NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 147 // NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 148 // NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 149 // NORM-NEXT: store i32 0, i32* [[I]], align 4 150 // NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 151 // NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 152 // NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 153 // NORM: omp.precond.then: 154 // NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 155 // NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 156 // NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 157 // NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 158 // NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 159 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 160 // NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 161 // NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 162 // NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 163 // NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 164 // NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 165 // NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 166 // NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 167 // NORM: cond.true: 168 // NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 169 // NORM-NEXT: br label [[COND_END:%.*]] 170 // NORM: cond.false: 171 // NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 172 // NORM-NEXT: br label [[COND_END]] 173 // NORM: cond.end: 174 // NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 175 // NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 176 // NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 177 // NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 178 // NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 179 // NORM: omp.inner.for.cond: 180 // NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 181 // NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 182 // NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 183 // NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 184 // NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 185 // NORM: omp.inner.for.body: 186 // NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 187 // NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 188 // NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 189 // NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 190 // NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 191 // NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 192 // NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 193 // NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 194 // NORM: omp.body.continue: 195 // NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 196 // NORM: omp.inner.for.inc: 197 // NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 198 // NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 199 // NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 200 // NORM-NEXT: br label [[OMP_INNER_FOR_COND]] 201 // NORM: omp.inner.for.end: 202 // NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 203 // NORM: omp.loop.exit: 204 // NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 205 // NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 206 // NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 207 // NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 208 // NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 209 // NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 210 // NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 211 // NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 212 // NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 213 // NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 214 // NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 215 // NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 216 // NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 217 // NORM-NEXT: ] 218 // NORM: .omp.reduction.case1: 219 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 220 // NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 221 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 222 // NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 223 // NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 224 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 225 // NORM: .omp.reduction.case2: 226 // NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 227 // NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 228 // NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 229 // NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 230 // NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 231 // NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 232 // NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 233 // NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 234 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 235 // NORM: .omp.reduction.default: 236 // NORM-NEXT: br label [[OMP_PRECOND_END]] 237 // NORM: omp.precond.end: 238 // NORM-NEXT: ret void 239 // 240 // 241 // NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 242 // NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 243 // NORM-NEXT: entry: 244 // NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 245 // NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 246 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 247 // NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 248 // NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 249 // NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 250 // NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 251 // NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 252 // NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 253 // NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 254 // NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 255 // NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 256 // NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 257 // NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 258 // NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 259 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 260 // NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 261 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 262 // NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 263 // NORM-NEXT: ret void 264 // 265 // 266 // NORM-LABEL: define {{[^@]+}}@.omp_outlined..1 267 // NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 268 // NORM-NEXT: entry: 269 // NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 270 // NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 271 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 272 // NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 273 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 274 // NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 275 // NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4 276 // NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 277 // NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 278 // NORM-NEXT: [[I:%.*]] = alloca i32, align 4 279 // NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 280 // NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 281 // NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 282 // NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 283 // NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 284 // NORM-NEXT: [[I4:%.*]] = alloca i32, align 4 285 // NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 286 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 287 // NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 288 // NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 289 // NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 290 // NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 291 // NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 292 // NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 293 // NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 294 // NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 295 // NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 296 // NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 297 // NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 298 // NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 299 // NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 300 // NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 301 // NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 302 // NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 303 // NORM-NEXT: store i32 0, i32* [[I]], align 4 304 // NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 305 // NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 306 // NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 307 // NORM: omp.precond.then: 308 // NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 309 // NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 310 // NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 311 // NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 312 // NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 313 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 314 // NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 315 // NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 316 // NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 317 // NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 318 // NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 319 // NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 320 // NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 321 // NORM: cond.true: 322 // NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 323 // NORM-NEXT: br label [[COND_END:%.*]] 324 // NORM: cond.false: 325 // NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 326 // NORM-NEXT: br label [[COND_END]] 327 // NORM: cond.end: 328 // NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 329 // NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 330 // NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 331 // NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 332 // NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 333 // NORM: omp.inner.for.cond: 334 // NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 335 // NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 336 // NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 337 // NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 338 // NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 339 // NORM: omp.inner.for.body: 340 // NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 341 // NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 342 // NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 343 // NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 344 // NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 345 // NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 346 // NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 347 // NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 348 // NORM: omp.body.continue: 349 // NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 350 // NORM: omp.inner.for.inc: 351 // NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 352 // NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 353 // NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 354 // NORM-NEXT: br label [[OMP_INNER_FOR_COND]] 355 // NORM: omp.inner.for.end: 356 // NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 357 // NORM: omp.loop.exit: 358 // NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 359 // NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 360 // NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 361 // NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 362 // NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 363 // NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 364 // NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 365 // NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 366 // NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 367 // NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) 368 // NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 369 // NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 370 // NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 371 // NORM-NEXT: ] 372 // NORM: .omp.reduction.case1: 373 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 374 // NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 375 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 376 // NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 377 // NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 378 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 379 // NORM: .omp.reduction.case2: 380 // NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 381 // NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 382 // NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 383 // NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 384 // NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 385 // NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 386 // NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 387 // NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 388 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 389 // NORM: .omp.reduction.default: 390 // NORM-NEXT: br label [[OMP_PRECOND_END]] 391 // NORM: omp.precond.end: 392 // NORM-NEXT: ret void 393 // 394 // 395 // NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 396 // NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 397 // NORM-NEXT: entry: 398 // NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 399 // NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 400 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 401 // NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 402 // NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 403 // NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 404 // NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 405 // NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 406 // NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 407 // NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 408 // NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 409 // NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 410 // NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 411 // NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 412 // NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 413 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointplERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 414 // NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 415 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 416 // NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 417 // NORM-NEXT: ret void 418 // 419 // 420 // NORM-LABEL: define {{[^@]+}}@.omp_outlined..3 421 // NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 422 // NORM-NEXT: entry: 423 // NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 424 // NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 425 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 426 // NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 427 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 428 // NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 429 // NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4 430 // NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 431 // NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 432 // NORM-NEXT: [[I:%.*]] = alloca i32, align 4 433 // NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 434 // NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 435 // NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 436 // NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 437 // NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 438 // NORM-NEXT: [[I4:%.*]] = alloca i32, align 4 439 // NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 440 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 441 // NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 442 // NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 443 // NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 444 // NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 445 // NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 446 // NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 447 // NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 448 // NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 449 // NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 450 // NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 451 // NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 452 // NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 453 // NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 454 // NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 455 // NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 456 // NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 457 // NORM-NEXT: store i32 0, i32* [[I]], align 4 458 // NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 459 // NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 460 // NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 461 // NORM: omp.precond.then: 462 // NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 463 // NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 464 // NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 465 // NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 466 // NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 467 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 468 // NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 469 // NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 470 // NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 471 // NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 472 // NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 473 // NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 474 // NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 475 // NORM: cond.true: 476 // NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 477 // NORM-NEXT: br label [[COND_END:%.*]] 478 // NORM: cond.false: 479 // NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 480 // NORM-NEXT: br label [[COND_END]] 481 // NORM: cond.end: 482 // NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 483 // NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 484 // NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 485 // NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 486 // NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 487 // NORM: omp.inner.for.cond: 488 // NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 489 // NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 490 // NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 491 // NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 492 // NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 493 // NORM: omp.inner.for.body: 494 // NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 495 // NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 496 // NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 497 // NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 498 // NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 499 // NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 500 // NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 501 // NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 502 // NORM: omp.body.continue: 503 // NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 504 // NORM: omp.inner.for.inc: 505 // NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 506 // NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 507 // NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 508 // NORM-NEXT: br label [[OMP_INNER_FOR_COND]] 509 // NORM: omp.inner.for.end: 510 // NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 511 // NORM: omp.loop.exit: 512 // NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 513 // NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 514 // NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 515 // NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 516 // NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 517 // NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 518 // NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 519 // NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 520 // NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 521 // NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.4, [8 x i32]* @.gomp_critical_user_.reduction.var) 522 // NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 523 // NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 524 // NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 525 // NORM-NEXT: ] 526 // NORM: .omp.reduction.case1: 527 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointmlERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 528 // NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 529 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 530 // NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 531 // NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 532 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 533 // NORM: .omp.reduction.case2: 534 // NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 535 // NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 536 // NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 537 // NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointmlERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 538 // NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 539 // NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 540 // NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 541 // NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 542 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 543 // NORM: .omp.reduction.default: 544 // NORM-NEXT: br label [[OMP_PRECOND_END]] 545 // NORM: omp.precond.end: 546 // NORM-NEXT: ret void 547 // 548 // 549 // NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.4 550 // NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 551 // NORM-NEXT: entry: 552 // NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 553 // NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 554 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 555 // NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 556 // NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 557 // NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 558 // NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 559 // NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 560 // NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 561 // NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 562 // NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 563 // NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 564 // NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 565 // NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 566 // NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 567 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointmlERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 568 // NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 569 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 570 // NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 571 // NORM-NEXT: ret void 572 // 573 // 574 // NORM-LABEL: define {{[^@]+}}@.omp_outlined..5 575 // NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 576 // NORM-NEXT: entry: 577 // NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 578 // NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 579 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 580 // NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 581 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 582 // NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 583 // NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4 584 // NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 585 // NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 586 // NORM-NEXT: [[I:%.*]] = alloca i32, align 4 587 // NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 588 // NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 589 // NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 590 // NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 591 // NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 592 // NORM-NEXT: [[I4:%.*]] = alloca i32, align 4 593 // NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 594 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 595 // NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 596 // NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 597 // NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 598 // NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 599 // NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 600 // NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 601 // NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 602 // NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 603 // NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 604 // NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 605 // NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 606 // NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 607 // NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 608 // NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 609 // NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 610 // NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 611 // NORM-NEXT: store i32 0, i32* [[I]], align 4 612 // NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 613 // NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 614 // NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 615 // NORM: omp.precond.then: 616 // NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 617 // NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 618 // NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 619 // NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 620 // NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 621 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 622 // NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 623 // NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 624 // NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 625 // NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 626 // NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 627 // NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 628 // NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 629 // NORM: cond.true: 630 // NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 631 // NORM-NEXT: br label [[COND_END:%.*]] 632 // NORM: cond.false: 633 // NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 634 // NORM-NEXT: br label [[COND_END]] 635 // NORM: cond.end: 636 // NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 637 // NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 638 // NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 639 // NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 640 // NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 641 // NORM: omp.inner.for.cond: 642 // NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 643 // NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 644 // NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 645 // NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 646 // NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 647 // NORM: omp.inner.for.body: 648 // NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 649 // NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 650 // NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 651 // NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 652 // NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 653 // NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 654 // NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 655 // NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 656 // NORM: omp.body.continue: 657 // NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 658 // NORM: omp.inner.for.inc: 659 // NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 660 // NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 661 // NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 662 // NORM-NEXT: br label [[OMP_INNER_FOR_COND]] 663 // NORM: omp.inner.for.end: 664 // NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 665 // NORM: omp.loop.exit: 666 // NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 667 // NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 668 // NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 669 // NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 670 // NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 671 // NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 672 // NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 673 // NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 674 // NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 675 // NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var) 676 // NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 677 // NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 678 // NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 679 // NORM-NEXT: ] 680 // NORM: .omp.reduction.case1: 681 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointanERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 682 // NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 683 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 684 // NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 685 // NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 686 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 687 // NORM: .omp.reduction.case2: 688 // NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 689 // NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 690 // NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 691 // NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointanERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 692 // NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 693 // NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 694 // NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 695 // NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 696 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 697 // NORM: .omp.reduction.default: 698 // NORM-NEXT: br label [[OMP_PRECOND_END]] 699 // NORM: omp.precond.end: 700 // NORM-NEXT: ret void 701 // 702 // 703 // NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6 704 // NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 705 // NORM-NEXT: entry: 706 // NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 707 // NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 708 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 709 // NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 710 // NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 711 // NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 712 // NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 713 // NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 714 // NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 715 // NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 716 // NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 717 // NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 718 // NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 719 // NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 720 // NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 721 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointanERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 722 // NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 723 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 724 // NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 725 // NORM-NEXT: ret void 726 // 727 // 728 // NORM-LABEL: define {{[^@]+}}@.omp_outlined..7 729 // NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 730 // NORM-NEXT: entry: 731 // NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 732 // NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 733 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 734 // NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 735 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 736 // NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 737 // NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4 738 // NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 739 // NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 740 // NORM-NEXT: [[I:%.*]] = alloca i32, align 4 741 // NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 742 // NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 743 // NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 744 // NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 745 // NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 746 // NORM-NEXT: [[I4:%.*]] = alloca i32, align 4 747 // NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 748 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 749 // NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 750 // NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 751 // NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 752 // NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 753 // NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 754 // NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 755 // NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 756 // NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 757 // NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 758 // NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 759 // NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 760 // NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 761 // NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 762 // NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 763 // NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 764 // NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 765 // NORM-NEXT: store i32 0, i32* [[I]], align 4 766 // NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 767 // NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 768 // NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 769 // NORM: omp.precond.then: 770 // NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 771 // NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 772 // NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 773 // NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 774 // NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 775 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 776 // NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 777 // NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 778 // NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 779 // NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 780 // NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 781 // NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 782 // NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 783 // NORM: cond.true: 784 // NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 785 // NORM-NEXT: br label [[COND_END:%.*]] 786 // NORM: cond.false: 787 // NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 788 // NORM-NEXT: br label [[COND_END]] 789 // NORM: cond.end: 790 // NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 791 // NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 792 // NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 793 // NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 794 // NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 795 // NORM: omp.inner.for.cond: 796 // NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 797 // NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 798 // NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 799 // NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 800 // NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 801 // NORM: omp.inner.for.body: 802 // NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 803 // NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 804 // NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 805 // NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 806 // NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 807 // NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 808 // NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 809 // NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 810 // NORM: omp.body.continue: 811 // NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 812 // NORM: omp.inner.for.inc: 813 // NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 814 // NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 815 // NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 816 // NORM-NEXT: br label [[OMP_INNER_FOR_COND]] 817 // NORM: omp.inner.for.end: 818 // NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 819 // NORM: omp.loop.exit: 820 // NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 821 // NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 822 // NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 823 // NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 824 // NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 825 // NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 826 // NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 827 // NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 828 // NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 829 // NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.8, [8 x i32]* @.gomp_critical_user_.reduction.var) 830 // NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 831 // NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 832 // NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 833 // NORM-NEXT: ] 834 // NORM: .omp.reduction.case1: 835 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointorERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 836 // NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 837 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 838 // NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 839 // NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 840 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 841 // NORM: .omp.reduction.case2: 842 // NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 843 // NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 844 // NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 845 // NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointorERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 846 // NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 847 // NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 848 // NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 849 // NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 850 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 851 // NORM: .omp.reduction.default: 852 // NORM-NEXT: br label [[OMP_PRECOND_END]] 853 // NORM: omp.precond.end: 854 // NORM-NEXT: ret void 855 // 856 // 857 // NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.8 858 // NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 859 // NORM-NEXT: entry: 860 // NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 861 // NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 862 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 863 // NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 864 // NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 865 // NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 866 // NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 867 // NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 868 // NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 869 // NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 870 // NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 871 // NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 872 // NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 873 // NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 874 // NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 875 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointorERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 876 // NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 877 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 878 // NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 879 // NORM-NEXT: ret void 880 // 881 // 882 // NORM-LABEL: define {{[^@]+}}@.omp_outlined..9 883 // NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 884 // NORM-NEXT: entry: 885 // NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 886 // NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 887 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 888 // NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 889 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 890 // NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 891 // NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4 892 // NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 893 // NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 894 // NORM-NEXT: [[I:%.*]] = alloca i32, align 4 895 // NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 896 // NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 897 // NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 898 // NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 899 // NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 900 // NORM-NEXT: [[I4:%.*]] = alloca i32, align 4 901 // NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 902 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 903 // NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 904 // NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 905 // NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 906 // NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 907 // NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 908 // NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 909 // NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 910 // NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 911 // NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 912 // NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 913 // NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 914 // NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 915 // NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 916 // NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 917 // NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 918 // NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 919 // NORM-NEXT: store i32 0, i32* [[I]], align 4 920 // NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 921 // NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 922 // NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 923 // NORM: omp.precond.then: 924 // NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 925 // NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 926 // NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 927 // NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 928 // NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 929 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 930 // NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 931 // NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 932 // NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 933 // NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 934 // NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 935 // NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 936 // NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 937 // NORM: cond.true: 938 // NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 939 // NORM-NEXT: br label [[COND_END:%.*]] 940 // NORM: cond.false: 941 // NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 942 // NORM-NEXT: br label [[COND_END]] 943 // NORM: cond.end: 944 // NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 945 // NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 946 // NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 947 // NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 948 // NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 949 // NORM: omp.inner.for.cond: 950 // NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 951 // NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 952 // NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 953 // NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 954 // NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 955 // NORM: omp.inner.for.body: 956 // NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 957 // NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 958 // NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 959 // NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 960 // NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 961 // NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 962 // NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 963 // NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 964 // NORM: omp.body.continue: 965 // NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 966 // NORM: omp.inner.for.inc: 967 // NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 968 // NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 969 // NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 970 // NORM-NEXT: br label [[OMP_INNER_FOR_COND]] 971 // NORM: omp.inner.for.end: 972 // NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 973 // NORM: omp.loop.exit: 974 // NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 975 // NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 976 // NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 977 // NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 978 // NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 979 // NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 980 // NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 981 // NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 982 // NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 983 // NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.10, [8 x i32]* @.gomp_critical_user_.reduction.var) 984 // NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 985 // NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 986 // NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 987 // NORM-NEXT: ] 988 // NORM: .omp.reduction.case1: 989 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointeoERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 990 // NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 991 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 992 // NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 993 // NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 994 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 995 // NORM: .omp.reduction.case2: 996 // NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 997 // NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 998 // NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 999 // NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointeoERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1000 // NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 1001 // NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 1002 // NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 1003 // NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1004 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1005 // NORM: .omp.reduction.default: 1006 // NORM-NEXT: br label [[OMP_PRECOND_END]] 1007 // NORM: omp.precond.end: 1008 // NORM-NEXT: ret void 1009 // 1010 // 1011 // NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.10 1012 // NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 1013 // NORM-NEXT: entry: 1014 // NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1015 // NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1016 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1017 // NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1018 // NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1019 // NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1020 // NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1021 // NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1022 // NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1023 // NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1024 // NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1025 // NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 1026 // NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1027 // NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1028 // NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 1029 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointeoERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 1030 // NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 1031 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 1032 // NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 1033 // NORM-NEXT: ret void 1034 // 1035 // 1036 // NORM-LABEL: define {{[^@]+}}@.omp_outlined..11 1037 // NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 1038 // NORM-NEXT: entry: 1039 // NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1040 // NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1041 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 1042 // NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 1043 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 1044 // NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1045 // NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4 1046 // NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1047 // NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1048 // NORM-NEXT: [[I:%.*]] = alloca i32, align 4 1049 // NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1050 // NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1051 // NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1052 // NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1053 // NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1054 // NORM-NEXT: [[I4:%.*]] = alloca i32, align 4 1055 // NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1056 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 1057 // NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 1058 // NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1059 // NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1060 // NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 1061 // NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 1062 // NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 1063 // NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 1064 // NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 1065 // NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 1066 // NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 1067 // NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 1068 // NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1069 // NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 1070 // NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 1071 // NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 1072 // NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1073 // NORM-NEXT: store i32 0, i32* [[I]], align 4 1074 // NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1075 // NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 1076 // NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1077 // NORM: omp.precond.then: 1078 // NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1079 // NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1080 // NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 1081 // NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1082 // NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1083 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 1084 // NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1085 // NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 1086 // NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1087 // NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1088 // NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1089 // NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 1090 // NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1091 // NORM: cond.true: 1092 // NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1093 // NORM-NEXT: br label [[COND_END:%.*]] 1094 // NORM: cond.false: 1095 // NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1096 // NORM-NEXT: br label [[COND_END]] 1097 // NORM: cond.end: 1098 // NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 1099 // NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1100 // NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1101 // NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 1102 // NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1103 // NORM: omp.inner.for.cond: 1104 // NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1105 // NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1106 // NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 1107 // NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 1108 // NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1109 // NORM: omp.inner.for.body: 1110 // NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1111 // NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 1112 // NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 1113 // NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 1114 // NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 1115 // NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 1116 // NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 1117 // NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1118 // NORM: omp.body.continue: 1119 // NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1120 // NORM: omp.inner.for.inc: 1121 // NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1122 // NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 1123 // NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 1124 // NORM-NEXT: br label [[OMP_INNER_FOR_COND]] 1125 // NORM: omp.inner.for.end: 1126 // NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1127 // NORM: omp.loop.exit: 1128 // NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1129 // NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 1130 // NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 1131 // NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 1132 // NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 1133 // NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 1134 // NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1135 // NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 1136 // NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1137 // NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.12, [8 x i32]* @.gomp_critical_user_.reduction.var) 1138 // NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1139 // NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1140 // NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1141 // NORM-NEXT: ] 1142 // NORM: .omp.reduction.case1: 1143 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1144 // NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 1145 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 1146 // NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 1147 // NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1148 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1149 // NORM: .omp.reduction.case2: 1150 // NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1151 // NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 1152 // NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1153 // NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1154 // NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 1155 // NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 1156 // NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 1157 // NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1158 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1159 // NORM: .omp.reduction.default: 1160 // NORM-NEXT: br label [[OMP_PRECOND_END]] 1161 // NORM: omp.precond.end: 1162 // NORM-NEXT: ret void 1163 // 1164 // 1165 // NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.12 1166 // NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 1167 // NORM-NEXT: entry: 1168 // NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1169 // NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1170 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1171 // NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1172 // NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1173 // NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1174 // NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1175 // NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1176 // NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1177 // NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1178 // NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1179 // NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 1180 // NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1181 // NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1182 // NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 1183 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 1184 // NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 1185 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 1186 // NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 1187 // NORM-NEXT: ret void 1188 // 1189 // 1190 // NORM-LABEL: define {{[^@]+}}@.omp_outlined..13 1191 // NORM-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 1192 // NORM-NEXT: entry: 1193 // NORM-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1194 // NORM-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1195 // NORM-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 1196 // NORM-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 1197 // NORM-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 1198 // NORM-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1199 // NORM-NEXT: [[TMP:%.*]] = alloca i32, align 4 1200 // NORM-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1201 // NORM-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1202 // NORM-NEXT: [[I:%.*]] = alloca i32, align 4 1203 // NORM-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1204 // NORM-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1205 // NORM-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1206 // NORM-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1207 // NORM-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1208 // NORM-NEXT: [[I4:%.*]] = alloca i32, align 4 1209 // NORM-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1210 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 1211 // NORM-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 1212 // NORM-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1213 // NORM-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1214 // NORM-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 1215 // NORM-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 1216 // NORM-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 1217 // NORM-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 1218 // NORM-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 1219 // NORM-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 1220 // NORM-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 1221 // NORM-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 1222 // NORM-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1223 // NORM-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 1224 // NORM-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 1225 // NORM-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 1226 // NORM-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1227 // NORM-NEXT: store i32 0, i32* [[I]], align 4 1228 // NORM-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1229 // NORM-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 1230 // NORM-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1231 // NORM: omp.precond.then: 1232 // NORM-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1233 // NORM-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1234 // NORM-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 1235 // NORM-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1236 // NORM-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1237 // NORM-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 1238 // NORM-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1239 // NORM-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 1240 // NORM-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1241 // NORM-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1242 // NORM-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1243 // NORM-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 1244 // NORM-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1245 // NORM: cond.true: 1246 // NORM-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1247 // NORM-NEXT: br label [[COND_END:%.*]] 1248 // NORM: cond.false: 1249 // NORM-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1250 // NORM-NEXT: br label [[COND_END]] 1251 // NORM: cond.end: 1252 // NORM-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 1253 // NORM-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1254 // NORM-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1255 // NORM-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 1256 // NORM-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1257 // NORM: omp.inner.for.cond: 1258 // NORM-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1259 // NORM-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1260 // NORM-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 1261 // NORM-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 1262 // NORM-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1263 // NORM: omp.inner.for.body: 1264 // NORM-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1265 // NORM-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 1266 // NORM-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 1267 // NORM-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 1268 // NORM-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 1269 // NORM-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 1270 // NORM-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 1271 // NORM-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1272 // NORM: omp.body.continue: 1273 // NORM-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1274 // NORM: omp.inner.for.inc: 1275 // NORM-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1276 // NORM-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 1277 // NORM-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 1278 // NORM-NEXT: br label [[OMP_INNER_FOR_COND]] 1279 // NORM: omp.inner.for.end: 1280 // NORM-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1281 // NORM: omp.loop.exit: 1282 // NORM-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1283 // NORM-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 1284 // NORM-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 1285 // NORM-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 1286 // NORM-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 1287 // NORM-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 1288 // NORM-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1289 // NORM-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 1290 // NORM-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1291 // NORM-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.14, [8 x i32]* @.gomp_critical_user_.reduction.var) 1292 // NORM-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1293 // NORM-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1294 // NORM-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1295 // NORM-NEXT: ] 1296 // NORM: .omp.reduction.case1: 1297 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1298 // NORM-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 1299 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 1300 // NORM-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 1301 // NORM-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1302 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1303 // NORM: .omp.reduction.case2: 1304 // NORM-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1305 // NORM-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 1306 // NORM-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1307 // NORM-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1308 // NORM-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 1309 // NORM-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 1310 // NORM-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 1311 // NORM-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1312 // NORM-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1313 // NORM: .omp.reduction.default: 1314 // NORM-NEXT: br label [[OMP_PRECOND_END]] 1315 // NORM: omp.precond.end: 1316 // NORM-NEXT: ret void 1317 // 1318 // 1319 // NORM-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.14 1320 // NORM-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 1321 // NORM-NEXT: entry: 1322 // NORM-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1323 // NORM-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1324 // NORM-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1325 // NORM-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1326 // NORM-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1327 // NORM-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1328 // NORM-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1329 // NORM-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1330 // NORM-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1331 // NORM-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1332 // NORM-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1333 // NORM-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 1334 // NORM-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1335 // NORM-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1336 // NORM-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 1337 // NORM-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 1338 // NORM-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 1339 // NORM-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 1340 // NORM-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 1341 // NORM-NEXT: ret void 1342 // 1343 // 1344 // NORM-LABEL: define {{[^@]+}}@_ZN5PointC2Ev 1345 // NORM-SAME: (%struct.Point* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 { 1346 // NORM-NEXT: entry: 1347 // NORM-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.Point*, align 8 1348 // NORM-NEXT: store %struct.Point* [[THIS]], %struct.Point** [[THIS_ADDR]], align 8 1349 // NORM-NEXT: [[THIS1:%.*]] = load %struct.Point*, %struct.Point** [[THIS_ADDR]], align 8 1350 // NORM-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_POINT:%.*]], %struct.Point* [[THIS1]], i32 0, i32 0 1351 // NORM-NEXT: store i32 0, i32* [[X]], align 4 1352 // NORM-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_POINT]], %struct.Point* [[THIS1]], i32 0, i32 1 1353 // NORM-NEXT: store i32 0, i32* [[Y]], align 4 1354 // NORM-NEXT: ret void 1355 // 1356 // 1357 // COMP-LABEL: define {{[^@]+}}@_Z3fooiPK5Point 1358 // COMP-SAME: (i32 [[N:%.*]], %struct.Point* [[POINTS:%.*]]) #[[ATTR0:[0-9]+]] { 1359 // COMP-NEXT: entry: 1360 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1361 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point*, align 8 1362 // COMP-NEXT: [[RED:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1363 // COMP-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1364 // COMP-NEXT: store %struct.Point* [[POINTS]], %struct.Point** [[POINTS_ADDR]], align 8 1365 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED]]) #[[ATTR4:[0-9]+]] 1366 // COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 1367 // COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 1368 // COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 1369 // COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 1370 // COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 1371 // COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 1372 // COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 1373 // COMP-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, %struct.Point*, %struct.Point**)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], %struct.Point* [[RED]], %struct.Point** [[POINTS_ADDR]]) 1374 // COMP-NEXT: ret void 1375 // 1376 // 1377 // COMP-LABEL: define {{[^@]+}}@_ZN5PointC1Ev 1378 // COMP-SAME: (%struct.Point* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { 1379 // COMP-NEXT: entry: 1380 // COMP-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.Point*, align 8 1381 // COMP-NEXT: store %struct.Point* [[THIS]], %struct.Point** [[THIS_ADDR]], align 8 1382 // COMP-NEXT: [[THIS1:%.*]] = load %struct.Point*, %struct.Point** [[THIS_ADDR]], align 8 1383 // COMP-NEXT: call void @_ZN5PointC2Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]] 1384 // COMP-NEXT: ret void 1385 // 1386 // 1387 // COMP-LABEL: define {{[^@]+}}@.omp_outlined. 1388 // COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2:[0-9]+]] { 1389 // COMP-NEXT: entry: 1390 // COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1391 // COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1392 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 1393 // COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 1394 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 1395 // COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1396 // COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4 1397 // COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1398 // COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1399 // COMP-NEXT: [[I:%.*]] = alloca i32, align 4 1400 // COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1401 // COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1402 // COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1403 // COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1404 // COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1405 // COMP-NEXT: [[I4:%.*]] = alloca i32, align 4 1406 // COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1407 // COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1408 // COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1409 // COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 1410 // COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 1411 // COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 1412 // COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 1413 // COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 1414 // COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 1415 // COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 1416 // COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 1417 // COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1418 // COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 1419 // COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 1420 // COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 1421 // COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1422 // COMP-NEXT: store i32 0, i32* [[I]], align 4 1423 // COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1424 // COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 1425 // COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1426 // COMP: omp.precond.then: 1427 // COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1428 // COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1429 // COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 1430 // COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1431 // COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1432 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 1433 // COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1434 // COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 1435 // COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1436 // COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1437 // COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1438 // COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 1439 // COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1440 // COMP: cond.true: 1441 // COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1442 // COMP-NEXT: br label [[COND_END:%.*]] 1443 // COMP: cond.false: 1444 // COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1445 // COMP-NEXT: br label [[COND_END]] 1446 // COMP: cond.end: 1447 // COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 1448 // COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1449 // COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1450 // COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 1451 // COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1452 // COMP: omp.inner.for.cond: 1453 // COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1454 // COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1455 // COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 1456 // COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 1457 // COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1458 // COMP: omp.inner.for.body: 1459 // COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1460 // COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 1461 // COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 1462 // COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 1463 // COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 1464 // COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 1465 // COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 1466 // COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1467 // COMP: omp.body.continue: 1468 // COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1469 // COMP: omp.inner.for.inc: 1470 // COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1471 // COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 1472 // COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 1473 // COMP-NEXT: br label [[OMP_INNER_FOR_COND]] 1474 // COMP: omp.inner.for.end: 1475 // COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1476 // COMP: omp.loop.exit: 1477 // COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1478 // COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 1479 // COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 1480 // COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 1481 // COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 1482 // COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 1483 // COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1484 // COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 1485 // COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1486 // COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 1487 // COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1488 // COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1489 // COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1490 // COMP-NEXT: ] 1491 // COMP: .omp.reduction.case1: 1492 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1493 // COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1494 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1495 // COMP: .omp.reduction.case2: 1496 // COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1497 // COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 1498 // COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1499 // COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1500 // COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1501 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1502 // COMP: .omp.reduction.default: 1503 // COMP-NEXT: br label [[OMP_PRECOND_END]] 1504 // COMP: omp.precond.end: 1505 // COMP-NEXT: ret void 1506 // 1507 // 1508 // COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 1509 // COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] { 1510 // COMP-NEXT: entry: 1511 // COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1512 // COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1513 // COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1514 // COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1515 // COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1516 // COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1517 // COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1518 // COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1519 // COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1520 // COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1521 // COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 1522 // COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1523 // COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1524 // COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 1525 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 1526 // COMP-NEXT: ret void 1527 // 1528 // 1529 // COMP-LABEL: define {{[^@]+}}@.omp_outlined..1 1530 // COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 1531 // COMP-NEXT: entry: 1532 // COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1533 // COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1534 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 1535 // COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 1536 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 1537 // COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1538 // COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4 1539 // COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1540 // COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1541 // COMP-NEXT: [[I:%.*]] = alloca i32, align 4 1542 // COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1543 // COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1544 // COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1545 // COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1546 // COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1547 // COMP-NEXT: [[I4:%.*]] = alloca i32, align 4 1548 // COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1549 // COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1550 // COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1551 // COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 1552 // COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 1553 // COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 1554 // COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 1555 // COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 1556 // COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 1557 // COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 1558 // COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 1559 // COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1560 // COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 1561 // COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 1562 // COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 1563 // COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1564 // COMP-NEXT: store i32 0, i32* [[I]], align 4 1565 // COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1566 // COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 1567 // COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1568 // COMP: omp.precond.then: 1569 // COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1570 // COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1571 // COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 1572 // COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1573 // COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1574 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 1575 // COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1576 // COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 1577 // COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1578 // COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1579 // COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1580 // COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 1581 // COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1582 // COMP: cond.true: 1583 // COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1584 // COMP-NEXT: br label [[COND_END:%.*]] 1585 // COMP: cond.false: 1586 // COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1587 // COMP-NEXT: br label [[COND_END]] 1588 // COMP: cond.end: 1589 // COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 1590 // COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1591 // COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1592 // COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 1593 // COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1594 // COMP: omp.inner.for.cond: 1595 // COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1596 // COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1597 // COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 1598 // COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 1599 // COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1600 // COMP: omp.inner.for.body: 1601 // COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1602 // COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 1603 // COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 1604 // COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 1605 // COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 1606 // COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 1607 // COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 1608 // COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1609 // COMP: omp.body.continue: 1610 // COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1611 // COMP: omp.inner.for.inc: 1612 // COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1613 // COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 1614 // COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 1615 // COMP-NEXT: br label [[OMP_INNER_FOR_COND]] 1616 // COMP: omp.inner.for.end: 1617 // COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1618 // COMP: omp.loop.exit: 1619 // COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1620 // COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 1621 // COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 1622 // COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 1623 // COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 1624 // COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 1625 // COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1626 // COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 1627 // COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1628 // COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) 1629 // COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1630 // COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1631 // COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1632 // COMP-NEXT: ] 1633 // COMP: .omp.reduction.case1: 1634 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1635 // COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1636 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1637 // COMP: .omp.reduction.case2: 1638 // COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1639 // COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 1640 // COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1641 // COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1642 // COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1643 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1644 // COMP: .omp.reduction.default: 1645 // COMP-NEXT: br label [[OMP_PRECOND_END]] 1646 // COMP: omp.precond.end: 1647 // COMP-NEXT: ret void 1648 // 1649 // 1650 // COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 1651 // COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 1652 // COMP-NEXT: entry: 1653 // COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1654 // COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1655 // COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1656 // COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1657 // COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1658 // COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1659 // COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1660 // COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1661 // COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1662 // COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1663 // COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 1664 // COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1665 // COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1666 // COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 1667 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointpLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 1668 // COMP-NEXT: ret void 1669 // 1670 // 1671 // COMP-LABEL: define {{[^@]+}}@.omp_outlined..3 1672 // COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 1673 // COMP-NEXT: entry: 1674 // COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1675 // COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1676 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 1677 // COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 1678 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 1679 // COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1680 // COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4 1681 // COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1682 // COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1683 // COMP-NEXT: [[I:%.*]] = alloca i32, align 4 1684 // COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1685 // COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1686 // COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1687 // COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1688 // COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1689 // COMP-NEXT: [[I4:%.*]] = alloca i32, align 4 1690 // COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1691 // COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1692 // COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1693 // COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 1694 // COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 1695 // COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 1696 // COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 1697 // COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 1698 // COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 1699 // COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 1700 // COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 1701 // COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1702 // COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 1703 // COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 1704 // COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 1705 // COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1706 // COMP-NEXT: store i32 0, i32* [[I]], align 4 1707 // COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1708 // COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 1709 // COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1710 // COMP: omp.precond.then: 1711 // COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1712 // COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1713 // COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 1714 // COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1715 // COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1716 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 1717 // COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1718 // COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 1719 // COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1720 // COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1721 // COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1722 // COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 1723 // COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1724 // COMP: cond.true: 1725 // COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1726 // COMP-NEXT: br label [[COND_END:%.*]] 1727 // COMP: cond.false: 1728 // COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1729 // COMP-NEXT: br label [[COND_END]] 1730 // COMP: cond.end: 1731 // COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 1732 // COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1733 // COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1734 // COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 1735 // COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1736 // COMP: omp.inner.for.cond: 1737 // COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1738 // COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1739 // COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 1740 // COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 1741 // COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1742 // COMP: omp.inner.for.body: 1743 // COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1744 // COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 1745 // COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 1746 // COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 1747 // COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 1748 // COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 1749 // COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 1750 // COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1751 // COMP: omp.body.continue: 1752 // COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1753 // COMP: omp.inner.for.inc: 1754 // COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1755 // COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 1756 // COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 1757 // COMP-NEXT: br label [[OMP_INNER_FOR_COND]] 1758 // COMP: omp.inner.for.end: 1759 // COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1760 // COMP: omp.loop.exit: 1761 // COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1762 // COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 1763 // COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 1764 // COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 1765 // COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 1766 // COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 1767 // COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1768 // COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 1769 // COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1770 // COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.4, [8 x i32]* @.gomp_critical_user_.reduction.var) 1771 // COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1772 // COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1773 // COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1774 // COMP-NEXT: ] 1775 // COMP: .omp.reduction.case1: 1776 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointmLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1777 // COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1778 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1779 // COMP: .omp.reduction.case2: 1780 // COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1781 // COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 1782 // COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1783 // COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointmLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1784 // COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1785 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1786 // COMP: .omp.reduction.default: 1787 // COMP-NEXT: br label [[OMP_PRECOND_END]] 1788 // COMP: omp.precond.end: 1789 // COMP-NEXT: ret void 1790 // 1791 // 1792 // COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.4 1793 // COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 1794 // COMP-NEXT: entry: 1795 // COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1796 // COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1797 // COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1798 // COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1799 // COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1800 // COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1801 // COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1802 // COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1803 // COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1804 // COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1805 // COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 1806 // COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1807 // COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1808 // COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 1809 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointmLERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 1810 // COMP-NEXT: ret void 1811 // 1812 // 1813 // COMP-LABEL: define {{[^@]+}}@.omp_outlined..5 1814 // COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 1815 // COMP-NEXT: entry: 1816 // COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1817 // COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1818 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 1819 // COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 1820 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 1821 // COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1822 // COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4 1823 // COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1824 // COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1825 // COMP-NEXT: [[I:%.*]] = alloca i32, align 4 1826 // COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1827 // COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1828 // COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1829 // COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1830 // COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1831 // COMP-NEXT: [[I4:%.*]] = alloca i32, align 4 1832 // COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1833 // COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1834 // COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1835 // COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 1836 // COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 1837 // COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 1838 // COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 1839 // COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 1840 // COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 1841 // COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 1842 // COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 1843 // COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1844 // COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 1845 // COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 1846 // COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 1847 // COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1848 // COMP-NEXT: store i32 0, i32* [[I]], align 4 1849 // COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1850 // COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 1851 // COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1852 // COMP: omp.precond.then: 1853 // COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1854 // COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1855 // COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 1856 // COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1857 // COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1858 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 1859 // COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1860 // COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 1861 // COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1862 // COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1863 // COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1864 // COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 1865 // COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1866 // COMP: cond.true: 1867 // COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1868 // COMP-NEXT: br label [[COND_END:%.*]] 1869 // COMP: cond.false: 1870 // COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1871 // COMP-NEXT: br label [[COND_END]] 1872 // COMP: cond.end: 1873 // COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 1874 // COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1875 // COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1876 // COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 1877 // COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1878 // COMP: omp.inner.for.cond: 1879 // COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1880 // COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1881 // COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 1882 // COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 1883 // COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1884 // COMP: omp.inner.for.body: 1885 // COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1886 // COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 1887 // COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 1888 // COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 1889 // COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 1890 // COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 1891 // COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 1892 // COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1893 // COMP: omp.body.continue: 1894 // COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1895 // COMP: omp.inner.for.inc: 1896 // COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1897 // COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 1898 // COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 1899 // COMP-NEXT: br label [[OMP_INNER_FOR_COND]] 1900 // COMP: omp.inner.for.end: 1901 // COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1902 // COMP: omp.loop.exit: 1903 // COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1904 // COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 1905 // COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 1906 // COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 1907 // COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 1908 // COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 1909 // COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1910 // COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 1911 // COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1912 // COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var) 1913 // COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1914 // COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1915 // COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1916 // COMP-NEXT: ] 1917 // COMP: .omp.reduction.case1: 1918 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaNERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1919 // COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1920 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1921 // COMP: .omp.reduction.case2: 1922 // COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1923 // COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 1924 // COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1925 // COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaNERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 1926 // COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 1927 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1928 // COMP: .omp.reduction.default: 1929 // COMP-NEXT: br label [[OMP_PRECOND_END]] 1930 // COMP: omp.precond.end: 1931 // COMP-NEXT: ret void 1932 // 1933 // 1934 // COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6 1935 // COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 1936 // COMP-NEXT: entry: 1937 // COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1938 // COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1939 // COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1940 // COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1941 // COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1942 // COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1943 // COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1944 // COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1945 // COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1946 // COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1947 // COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 1948 // COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1949 // COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1950 // COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 1951 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaNERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 1952 // COMP-NEXT: ret void 1953 // 1954 // 1955 // COMP-LABEL: define {{[^@]+}}@.omp_outlined..7 1956 // COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 1957 // COMP-NEXT: entry: 1958 // COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1959 // COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1960 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 1961 // COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 1962 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 1963 // COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1964 // COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4 1965 // COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1966 // COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1967 // COMP-NEXT: [[I:%.*]] = alloca i32, align 4 1968 // COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1969 // COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1970 // COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1971 // COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1972 // COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 1973 // COMP-NEXT: [[I4:%.*]] = alloca i32, align 4 1974 // COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1975 // COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1976 // COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1977 // COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 1978 // COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 1979 // COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 1980 // COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 1981 // COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 1982 // COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 1983 // COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 1984 // COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 1985 // COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1986 // COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 1987 // COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 1988 // COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 1989 // COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1990 // COMP-NEXT: store i32 0, i32* [[I]], align 4 1991 // COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1992 // COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 1993 // COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1994 // COMP: omp.precond.then: 1995 // COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1996 // COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1997 // COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 1998 // COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1999 // COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2000 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 2001 // COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2002 // COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2003 // COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2004 // COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2005 // COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2006 // COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 2007 // COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2008 // COMP: cond.true: 2009 // COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2010 // COMP-NEXT: br label [[COND_END:%.*]] 2011 // COMP: cond.false: 2012 // COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2013 // COMP-NEXT: br label [[COND_END]] 2014 // COMP: cond.end: 2015 // COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 2016 // COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2017 // COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2018 // COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 2019 // COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2020 // COMP: omp.inner.for.cond: 2021 // COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2022 // COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2023 // COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 2024 // COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 2025 // COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2026 // COMP: omp.inner.for.body: 2027 // COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2028 // COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 2029 // COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 2030 // COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 2031 // COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 2032 // COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 2033 // COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 2034 // COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2035 // COMP: omp.body.continue: 2036 // COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2037 // COMP: omp.inner.for.inc: 2038 // COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2039 // COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 2040 // COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 2041 // COMP-NEXT: br label [[OMP_INNER_FOR_COND]] 2042 // COMP: omp.inner.for.end: 2043 // COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2044 // COMP: omp.loop.exit: 2045 // COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2046 // COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 2047 // COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 2048 // COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2049 // COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 2050 // COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 2051 // COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2052 // COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 2053 // COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2054 // COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.8, [8 x i32]* @.gomp_critical_user_.reduction.var) 2055 // COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2056 // COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2057 // COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2058 // COMP-NEXT: ] 2059 // COMP: .omp.reduction.case1: 2060 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointoRERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 2061 // COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2062 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2063 // COMP: .omp.reduction.case2: 2064 // COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2065 // COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 2066 // COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 2067 // COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointoRERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 2068 // COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 2069 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2070 // COMP: .omp.reduction.default: 2071 // COMP-NEXT: br label [[OMP_PRECOND_END]] 2072 // COMP: omp.precond.end: 2073 // COMP-NEXT: ret void 2074 // 2075 // 2076 // COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.8 2077 // COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 2078 // COMP-NEXT: entry: 2079 // COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2080 // COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2081 // COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2082 // COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2083 // COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2084 // COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2085 // COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2086 // COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2087 // COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 2088 // COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2089 // COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 2090 // COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 2091 // COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2092 // COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 2093 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointoRERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 2094 // COMP-NEXT: ret void 2095 // 2096 // 2097 // COMP-LABEL: define {{[^@]+}}@.omp_outlined..9 2098 // COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 2099 // COMP-NEXT: entry: 2100 // COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2101 // COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2102 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 2103 // COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 2104 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 2105 // COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2106 // COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4 2107 // COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2108 // COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2109 // COMP-NEXT: [[I:%.*]] = alloca i32, align 4 2110 // COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2111 // COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2112 // COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2113 // COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2114 // COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 2115 // COMP-NEXT: [[I4:%.*]] = alloca i32, align 4 2116 // COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 2117 // COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2118 // COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2119 // COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 2120 // COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 2121 // COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 2122 // COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 2123 // COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 2124 // COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 2125 // COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 2126 // COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 2127 // COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2128 // COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 2129 // COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 2130 // COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 2131 // COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2132 // COMP-NEXT: store i32 0, i32* [[I]], align 4 2133 // COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2134 // COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 2135 // COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 2136 // COMP: omp.precond.then: 2137 // COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2138 // COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2139 // COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 2140 // COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2141 // COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2142 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 2143 // COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2144 // COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2145 // COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2146 // COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2147 // COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2148 // COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 2149 // COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2150 // COMP: cond.true: 2151 // COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2152 // COMP-NEXT: br label [[COND_END:%.*]] 2153 // COMP: cond.false: 2154 // COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2155 // COMP-NEXT: br label [[COND_END]] 2156 // COMP: cond.end: 2157 // COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 2158 // COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2159 // COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2160 // COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 2161 // COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2162 // COMP: omp.inner.for.cond: 2163 // COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2164 // COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2165 // COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 2166 // COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 2167 // COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2168 // COMP: omp.inner.for.body: 2169 // COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2170 // COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 2171 // COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 2172 // COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 2173 // COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 2174 // COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 2175 // COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 2176 // COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2177 // COMP: omp.body.continue: 2178 // COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2179 // COMP: omp.inner.for.inc: 2180 // COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2181 // COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 2182 // COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 2183 // COMP-NEXT: br label [[OMP_INNER_FOR_COND]] 2184 // COMP: omp.inner.for.end: 2185 // COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2186 // COMP: omp.loop.exit: 2187 // COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2188 // COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 2189 // COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 2190 // COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2191 // COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 2192 // COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 2193 // COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2194 // COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 2195 // COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2196 // COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.10, [8 x i32]* @.gomp_critical_user_.reduction.var) 2197 // COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2198 // COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2199 // COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2200 // COMP-NEXT: ] 2201 // COMP: .omp.reduction.case1: 2202 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointeOERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 2203 // COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2204 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2205 // COMP: .omp.reduction.case2: 2206 // COMP-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2207 // COMP-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 2208 // COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 2209 // COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointeOERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 2210 // COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP29]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 2211 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2212 // COMP: .omp.reduction.default: 2213 // COMP-NEXT: br label [[OMP_PRECOND_END]] 2214 // COMP: omp.precond.end: 2215 // COMP-NEXT: ret void 2216 // 2217 // 2218 // COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.10 2219 // COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 2220 // COMP-NEXT: entry: 2221 // COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2222 // COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2223 // COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2224 // COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2225 // COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2226 // COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2227 // COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2228 // COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2229 // COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 2230 // COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2231 // COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 2232 // COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 2233 // COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2234 // COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 2235 // COMP-NEXT: [[CALL:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointeOERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 2236 // COMP-NEXT: ret void 2237 // 2238 // 2239 // COMP-LABEL: define {{[^@]+}}@.omp_outlined..11 2240 // COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 2241 // COMP-NEXT: entry: 2242 // COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2243 // COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2244 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 2245 // COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 2246 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 2247 // COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2248 // COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4 2249 // COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2250 // COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2251 // COMP-NEXT: [[I:%.*]] = alloca i32, align 4 2252 // COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2253 // COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2254 // COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2255 // COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2256 // COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 2257 // COMP-NEXT: [[I4:%.*]] = alloca i32, align 4 2258 // COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 2259 // COMP-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 2260 // COMP-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 2261 // COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2262 // COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2263 // COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 2264 // COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 2265 // COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 2266 // COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 2267 // COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 2268 // COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 2269 // COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 2270 // COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 2271 // COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2272 // COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 2273 // COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 2274 // COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 2275 // COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2276 // COMP-NEXT: store i32 0, i32* [[I]], align 4 2277 // COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2278 // COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 2279 // COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 2280 // COMP: omp.precond.then: 2281 // COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2282 // COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2283 // COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 2284 // COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2285 // COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2286 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 2287 // COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2288 // COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2289 // COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2290 // COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2291 // COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2292 // COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 2293 // COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2294 // COMP: cond.true: 2295 // COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2296 // COMP-NEXT: br label [[COND_END:%.*]] 2297 // COMP: cond.false: 2298 // COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2299 // COMP-NEXT: br label [[COND_END]] 2300 // COMP: cond.end: 2301 // COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 2302 // COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2303 // COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2304 // COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 2305 // COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2306 // COMP: omp.inner.for.cond: 2307 // COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2308 // COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2309 // COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 2310 // COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 2311 // COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2312 // COMP: omp.inner.for.body: 2313 // COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2314 // COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 2315 // COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 2316 // COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 2317 // COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 2318 // COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 2319 // COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 2320 // COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2321 // COMP: omp.body.continue: 2322 // COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2323 // COMP: omp.inner.for.inc: 2324 // COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2325 // COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 2326 // COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 2327 // COMP-NEXT: br label [[OMP_INNER_FOR_COND]] 2328 // COMP: omp.inner.for.end: 2329 // COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2330 // COMP: omp.loop.exit: 2331 // COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2332 // COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 2333 // COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 2334 // COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2335 // COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 2336 // COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 2337 // COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2338 // COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 2339 // COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2340 // COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.12, [8 x i32]* @.gomp_critical_user_.reduction.var) 2341 // COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2342 // COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2343 // COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2344 // COMP-NEXT: ] 2345 // COMP: .omp.reduction.case1: 2346 // COMP-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 2347 // COMP-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 2348 // COMP-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 2349 // COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 2350 // COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2351 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2352 // COMP: .omp.reduction.case2: 2353 // COMP-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2354 // COMP-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 2355 // COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 2356 // COMP-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 2357 // COMP-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 2358 // COMP-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 2359 // COMP-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 2360 // COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 2361 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2362 // COMP: .omp.reduction.default: 2363 // COMP-NEXT: br label [[OMP_PRECOND_END]] 2364 // COMP: omp.precond.end: 2365 // COMP-NEXT: ret void 2366 // 2367 // 2368 // COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.12 2369 // COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 2370 // COMP-NEXT: entry: 2371 // COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2372 // COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2373 // COMP-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 2374 // COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2375 // COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2376 // COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2377 // COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2378 // COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2379 // COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2380 // COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 2381 // COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2382 // COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 2383 // COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 2384 // COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2385 // COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 2386 // COMP-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointaaERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 2387 // COMP-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 2388 // COMP-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 2389 // COMP-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 2390 // COMP-NEXT: ret void 2391 // 2392 // 2393 // COMP-LABEL: define {{[^@]+}}@.omp_outlined..13 2394 // COMP-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32* nonnull align 4 dereferenceable(4) [[N:%.*]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED:%.*]], %struct.Point** nonnull align 8 dereferenceable(8) [[POINTS:%.*]]) #[[ATTR2]] { 2395 // COMP-NEXT: entry: 2396 // COMP-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2397 // COMP-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2398 // COMP-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 2399 // COMP-NEXT: [[RED_ADDR:%.*]] = alloca %struct.Point*, align 8 2400 // COMP-NEXT: [[POINTS_ADDR:%.*]] = alloca %struct.Point**, align 8 2401 // COMP-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2402 // COMP-NEXT: [[TMP:%.*]] = alloca i32, align 4 2403 // COMP-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2404 // COMP-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2405 // COMP-NEXT: [[I:%.*]] = alloca i32, align 4 2406 // COMP-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2407 // COMP-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2408 // COMP-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2409 // COMP-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2410 // COMP-NEXT: [[RED3:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 2411 // COMP-NEXT: [[I4:%.*]] = alloca i32, align 4 2412 // COMP-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 2413 // COMP-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT]], align 4 2414 // COMP-NEXT: [[REF_TMP10:%.*]] = alloca [[STRUCT_POINT]], align 4 2415 // COMP-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2416 // COMP-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2417 // COMP-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 2418 // COMP-NEXT: store %struct.Point* [[RED]], %struct.Point** [[RED_ADDR]], align 8 2419 // COMP-NEXT: store %struct.Point** [[POINTS]], %struct.Point*** [[POINTS_ADDR]], align 8 2420 // COMP-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 2421 // COMP-NEXT: [[TMP1:%.*]] = load %struct.Point*, %struct.Point** [[RED_ADDR]], align 8 2422 // COMP-NEXT: [[TMP2:%.*]] = load %struct.Point**, %struct.Point*** [[POINTS_ADDR]], align 8 2423 // COMP-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 2424 // COMP-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 2425 // COMP-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2426 // COMP-NEXT: [[SUB:%.*]] = sub i32 [[TMP4]], 0 2427 // COMP-NEXT: [[DIV:%.*]] = udiv i32 [[SUB]], 1 2428 // COMP-NEXT: [[SUB2:%.*]] = sub i32 [[DIV]], 1 2429 // COMP-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2430 // COMP-NEXT: store i32 0, i32* [[I]], align 4 2431 // COMP-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2432 // COMP-NEXT: [[CMP:%.*]] = icmp ult i32 0, [[TMP5]] 2433 // COMP-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 2434 // COMP: omp.precond.then: 2435 // COMP-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2436 // COMP-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2437 // COMP-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 2438 // COMP-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2439 // COMP-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2440 // COMP-NEXT: call void @_ZN5PointC1Ev(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) #[[ATTR4]] 2441 // COMP-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2442 // COMP-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2443 // COMP-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2444 // COMP-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2445 // COMP-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2446 // COMP-NEXT: [[CMP5:%.*]] = icmp ugt i32 [[TMP9]], [[TMP10]] 2447 // COMP-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2448 // COMP: cond.true: 2449 // COMP-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2450 // COMP-NEXT: br label [[COND_END:%.*]] 2451 // COMP: cond.false: 2452 // COMP-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2453 // COMP-NEXT: br label [[COND_END]] 2454 // COMP: cond.end: 2455 // COMP-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 2456 // COMP-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2457 // COMP-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2458 // COMP-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 2459 // COMP-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2460 // COMP: omp.inner.for.cond: 2461 // COMP-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2462 // COMP-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2463 // COMP-NEXT: [[ADD:%.*]] = add i32 [[TMP15]], 1 2464 // COMP-NEXT: [[CMP6:%.*]] = icmp ult i32 [[TMP14]], [[ADD]] 2465 // COMP-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2466 // COMP: omp.inner.for.body: 2467 // COMP-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2468 // COMP-NEXT: [[MUL:%.*]] = mul i32 [[TMP16]], 1 2469 // COMP-NEXT: [[ADD7:%.*]] = add i32 0, [[MUL]] 2470 // COMP-NEXT: store i32 [[ADD7]], i32* [[I4]], align 4 2471 // COMP-NEXT: [[TMP17:%.*]] = load i32, i32* [[I4]], align 4 2472 // COMP-NEXT: [[TMP18:%.*]] = load %struct.Point*, %struct.Point** [[TMP2]], align 8 2473 // COMP-NEXT: call void @_Z4workR5PointiPKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[RED3]], i32 [[TMP17]], %struct.Point* [[TMP18]]) 2474 // COMP-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2475 // COMP: omp.body.continue: 2476 // COMP-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2477 // COMP: omp.inner.for.inc: 2478 // COMP-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2479 // COMP-NEXT: [[ADD8:%.*]] = add i32 [[TMP19]], 1 2480 // COMP-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 2481 // COMP-NEXT: br label [[OMP_INNER_FOR_COND]] 2482 // COMP: omp.inner.for.end: 2483 // COMP-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2484 // COMP: omp.loop.exit: 2485 // COMP-NEXT: [[TMP20:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2486 // COMP-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP20]], align 4 2487 // COMP-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP21]]) 2488 // COMP-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2489 // COMP-NEXT: [[TMP23:%.*]] = bitcast %struct.Point* [[RED3]] to i8* 2490 // COMP-NEXT: store i8* [[TMP23]], i8** [[TMP22]], align 8 2491 // COMP-NEXT: [[TMP24:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2492 // COMP-NEXT: [[TMP25:%.*]] = load i32, i32* [[TMP24]], align 4 2493 // COMP-NEXT: [[TMP26:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2494 // COMP-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], i32 1, i64 8, i8* [[TMP26]], void (i8*, i8*)* @.omp.reduction.reduction_func.14, [8 x i32]* @.gomp_critical_user_.reduction.var) 2495 // COMP-NEXT: switch i32 [[TMP27]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2496 // COMP-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2497 // COMP-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2498 // COMP-NEXT: ] 2499 // COMP: .omp.reduction.case1: 2500 // COMP-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 2501 // COMP-NEXT: [[TMP28:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 2502 // COMP-NEXT: store i64 [[CALL]], i64* [[TMP28]], align 4 2503 // COMP-NEXT: [[CALL9:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 2504 // COMP-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB2]], i32 [[TMP25]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2505 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2506 // COMP: .omp.reduction.case2: 2507 // COMP-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2508 // COMP-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 2509 // COMP-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 2510 // COMP-NEXT: [[CALL11:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[RED3]]) 2511 // COMP-NEXT: [[TMP31:%.*]] = bitcast %struct.Point* [[REF_TMP10]] to i64* 2512 // COMP-NEXT: store i64 [[CALL11]], i64* [[TMP31]], align 4 2513 // COMP-NEXT: [[CALL12:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP1]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP10]]) 2514 // COMP-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB3]], i32 [[TMP30]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 2515 // COMP-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2516 // COMP: .omp.reduction.default: 2517 // COMP-NEXT: br label [[OMP_PRECOND_END]] 2518 // COMP: omp.precond.end: 2519 // COMP-NEXT: ret void 2520 // 2521 // 2522 // COMP-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.14 2523 // COMP-SAME: (i8* [[TMP0:%.*]], i8* [[TMP1:%.*]]) #[[ATTR5]] { 2524 // COMP-NEXT: entry: 2525 // COMP-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2526 // COMP-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2527 // COMP-NEXT: [[REF_TMP:%.*]] = alloca [[STRUCT_POINT:%.*]], align 4 2528 // COMP-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2529 // COMP-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2530 // COMP-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2531 // COMP-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2532 // COMP-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2533 // COMP-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2534 // COMP-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 2535 // COMP-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2536 // COMP-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.Point* 2537 // COMP-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 2538 // COMP-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2539 // COMP-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %struct.Point* 2540 // COMP-NEXT: [[CALL:%.*]] = call i64 @_ZNK5PointooERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[TMP8]]) 2541 // COMP-NEXT: [[TMP12:%.*]] = bitcast %struct.Point* [[REF_TMP]] to i64* 2542 // COMP-NEXT: store i64 [[CALL]], i64* [[TMP12]], align 4 2543 // COMP-NEXT: [[CALL2:%.*]] = call nonnull align 4 dereferenceable(8) %struct.Point* @_ZN5PointaSERKS_(%struct.Point* nonnull align 4 dereferenceable(8) [[TMP11]], %struct.Point* nonnull align 4 dereferenceable(8) [[REF_TMP]]) 2544 // COMP-NEXT: ret void 2545 // 2546 // 2547 // COMP-LABEL: define {{[^@]+}}@_ZN5PointC2Ev 2548 // COMP-SAME: (%struct.Point* nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 { 2549 // COMP-NEXT: entry: 2550 // COMP-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.Point*, align 8 2551 // COMP-NEXT: store %struct.Point* [[THIS]], %struct.Point** [[THIS_ADDR]], align 8 2552 // COMP-NEXT: [[THIS1:%.*]] = load %struct.Point*, %struct.Point** [[THIS_ADDR]], align 8 2553 // COMP-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_POINT:%.*]], %struct.Point* [[THIS1]], i32 0, i32 0 2554 // COMP-NEXT: store i32 0, i32* [[X]], align 4 2555 // COMP-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_POINT]], %struct.Point* [[THIS1]], i32 0, i32 1 2556 // COMP-NEXT: store i32 0, i32* [[Y]], align 4 2557 // COMP-NEXT: ret void 2558 // 2559