1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -S -passes=ipsccp < %s | FileCheck %s 3; 4; void bar(int, float, double); 5; 6; void foo(int N) { 7; float p = 3; 8; double q = 5; 9; N = 7; 10; 11; #pragma omp parallel for firstprivate(q) 12; for (int i = 2; i < N; i++) { 13; bar(i, p, q); 14; } 15; } 16; 17; Verify the constant value of q is propagated into the outlined function. 18; 19target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 20 21%struct.ident_t = type { i32, i32, i32, i32, i8* } 22 23@.str = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1 24@0 = private unnamed_addr global %struct.ident_t { i32 0, i32 514, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 0) }, align 8 25@1 = private unnamed_addr global %struct.ident_t { i32 0, i32 2, i32 0, i32 0, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @.str, i32 0, i32 0) }, align 8 26 27define dso_local void @foo(i32 %N) { 28; CHECK-LABEL: @foo( 29; CHECK-NEXT: entry: 30; CHECK-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 31; CHECK-NEXT: [[P:%.*]] = alloca float, align 4 32; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_ADDR]], align 4 33; CHECK-NEXT: store float 3.000000e+00, float* [[P]], align 4 34; CHECK-NEXT: store i32 7, i32* [[N_ADDR]], align 4 35; CHECK-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* nonnull @1, i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* nonnull [[N_ADDR]], float* nonnull [[P]], i64 4617315517961601024) 36; CHECK-NEXT: ret void 37; 38entry: 39 %N.addr = alloca i32, align 4 40 %p = alloca float, align 4 41 store i32 %N, i32* %N.addr, align 4 42 store float 3.000000e+00, float* %p, align 4 43 store i32 7, i32* %N.addr, align 4 44 call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* nonnull @1, i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, float*, i64)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* nonnull %N.addr, float* nonnull %p, i64 4617315517961601024) 45 ret void 46} 47 48define internal void @.omp_outlined.(i32* noalias %.global_tid., i32* noalias %.bound_tid., i32* dereferenceable(4) %N, float* dereferenceable(4) %p, i64 %q) { 49; CHECK-LABEL: @.omp_outlined.( 50; CHECK-NEXT: entry: 51; CHECK-NEXT: [[Q_ADDR:%.*]] = alloca i64, align 8 52; CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 53; CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 54; CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 55; CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 56; CHECK-NEXT: store i64 [[Q:%.*]], i64* [[Q_ADDR]], align 8 57; CHECK-NEXT: [[CONV:%.*]] = bitcast i64* [[Q_ADDR]] to double* 58; CHECK-NEXT: [[TMP:%.*]] = load i32, i32* [[N:%.*]], align 4 59; CHECK-NEXT: [[SUB3:%.*]] = add nsw i32 [[TMP]], -3 60; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP]], 2 61; CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 62; CHECK: omp.precond.then: 63; CHECK-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 64; CHECK-NEXT: store i32 [[SUB3]], i32* [[DOTOMP_UB]], align 4 65; CHECK-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 66; CHECK-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 67; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTGLOBAL_TID_:%.*]], align 4 68; CHECK-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* nonnull @0, i32 [[TMP5]], i32 34, i32* nonnull [[DOTOMP_IS_LAST]], i32* nonnull [[DOTOMP_LB]], i32* nonnull [[DOTOMP_UB]], i32* nonnull [[DOTOMP_STRIDE]], i32 1, i32 1) 69; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 70; CHECK-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP6]], [[SUB3]] 71; CHECK-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 72; CHECK: cond.true: 73; CHECK-NEXT: br label [[COND_END:%.*]] 74; CHECK: cond.false: 75; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 76; CHECK-NEXT: br label [[COND_END]] 77; CHECK: cond.end: 78; CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[SUB3]], [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 79; CHECK-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 80; CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 81; CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 82; CHECK: omp.inner.for.cond: 83; CHECK-NEXT: [[DOTOMP_IV_0:%.*]] = phi i32 [ [[TMP8]], [[COND_END]] ], [ [[ADD11:%.*]], [[OMP_INNER_FOR_INC:%.*]] ] 84; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 85; CHECK-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[DOTOMP_IV_0]], [[TMP9]] 86; CHECK-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]], label [[OMP_INNER_FOR_BODY:%.*]] 87; CHECK: omp.inner.for.cond.cleanup: 88; CHECK-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 89; CHECK: omp.inner.for.body: 90; CHECK-NEXT: [[ADD10:%.*]] = add nsw i32 [[DOTOMP_IV_0]], 2 91; CHECK-NEXT: [[TMP10:%.*]] = load float, float* [[P:%.*]], align 4 92; CHECK-NEXT: [[TMP11:%.*]] = load double, double* [[CONV]], align 8 93; CHECK-NEXT: call void @bar(i32 [[ADD10]], float [[TMP10]], double [[TMP11]]) 94; CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 95; CHECK: omp.body.continue: 96; CHECK-NEXT: br label [[OMP_INNER_FOR_INC]] 97; CHECK: omp.inner.for.inc: 98; CHECK-NEXT: [[ADD11]] = add nsw i32 [[DOTOMP_IV_0]], 1 99; CHECK-NEXT: br label [[OMP_INNER_FOR_COND]] 100; CHECK: omp.inner.for.end: 101; CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 102; CHECK: omp.loop.exit: 103; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTGLOBAL_TID_]], align 4 104; CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* nonnull @0, i32 [[TMP12]]) 105; CHECK-NEXT: br label [[OMP_PRECOND_END]] 106; CHECK: omp.precond.end: 107; CHECK-NEXT: ret void 108; 109entry: 110 %q.addr = alloca i64, align 8 111 %.omp.lb = alloca i32, align 4 112 %.omp.ub = alloca i32, align 4 113 %.omp.stride = alloca i32, align 4 114 %.omp.is_last = alloca i32, align 4 115 store i64 %q, i64* %q.addr, align 8 116 %conv = bitcast i64* %q.addr to double* 117 %tmp = load i32, i32* %N, align 4 118 %sub3 = add nsw i32 %tmp, -3 119 %cmp = icmp sgt i32 %tmp, 2 120 br i1 %cmp, label %omp.precond.then, label %omp.precond.end 121 122omp.precond.then: ; preds = %entry 123 store i32 0, i32* %.omp.lb, align 4 124 store i32 %sub3, i32* %.omp.ub, align 4 125 store i32 1, i32* %.omp.stride, align 4 126 store i32 0, i32* %.omp.is_last, align 4 127 %tmp5 = load i32, i32* %.global_tid., align 4 128 call void @__kmpc_for_static_init_4(%struct.ident_t* nonnull @0, i32 %tmp5, i32 34, i32* nonnull %.omp.is_last, i32* nonnull %.omp.lb, i32* nonnull %.omp.ub, i32* nonnull %.omp.stride, i32 1, i32 1) 129 %tmp6 = load i32, i32* %.omp.ub, align 4 130 %cmp6 = icmp sgt i32 %tmp6, %sub3 131 br i1 %cmp6, label %cond.true, label %cond.false 132 133cond.true: ; preds = %omp.precond.then 134 br label %cond.end 135 136cond.false: ; preds = %omp.precond.then 137 %tmp7 = load i32, i32* %.omp.ub, align 4 138 br label %cond.end 139 140cond.end: ; preds = %cond.false, %cond.true 141 %cond = phi i32 [ %sub3, %cond.true ], [ %tmp7, %cond.false ] 142 store i32 %cond, i32* %.omp.ub, align 4 143 %tmp8 = load i32, i32* %.omp.lb, align 4 144 br label %omp.inner.for.cond 145 146omp.inner.for.cond: ; preds = %omp.inner.for.inc, %cond.end 147 %.omp.iv.0 = phi i32 [ %tmp8, %cond.end ], [ %add11, %omp.inner.for.inc ] 148 %tmp9 = load i32, i32* %.omp.ub, align 4 149 %cmp8 = icmp sgt i32 %.omp.iv.0, %tmp9 150 br i1 %cmp8, label %omp.inner.for.cond.cleanup, label %omp.inner.for.body 151 152omp.inner.for.cond.cleanup: ; preds = %omp.inner.for.cond 153 br label %omp.inner.for.end 154 155omp.inner.for.body: ; preds = %omp.inner.for.cond 156 %add10 = add nsw i32 %.omp.iv.0, 2 157 %tmp10 = load float, float* %p, align 4 158 %tmp11 = load double, double* %conv, align 8 159 call void @bar(i32 %add10, float %tmp10, double %tmp11) 160 br label %omp.body.continue 161 162omp.body.continue: ; preds = %omp.inner.for.body 163 br label %omp.inner.for.inc 164 165omp.inner.for.inc: ; preds = %omp.body.continue 166 %add11 = add nsw i32 %.omp.iv.0, 1 167 br label %omp.inner.for.cond 168 169omp.inner.for.end: ; preds = %omp.inner.for.cond.cleanup 170 br label %omp.loop.exit 171 172omp.loop.exit: ; preds = %omp.inner.for.end 173 %tmp12 = load i32, i32* %.global_tid., align 4 174 call void @__kmpc_for_static_fini(%struct.ident_t* nonnull @0, i32 %tmp12) 175 br label %omp.precond.end 176 177omp.precond.end: ; preds = %omp.loop.exit, %entry 178 ret void 179} 180 181declare dso_local void @__kmpc_for_static_init_4(%struct.ident_t*, i32, i32, i32*, i32*, i32*, i32*, i32, i32) 182 183declare dso_local void @bar(i32, float, double) 184 185declare dso_local void @__kmpc_for_static_fini(%struct.ident_t*, i32) 186 187declare !callback !0 dso_local void @__kmpc_fork_call(%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) 188 189!1 = !{i64 2, i64 -1, i64 -1, i1 true} 190!0 = !{!1} 191