1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -basic-aa -slp-vectorizer -S | FileCheck %s 3target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" 4target triple = "x86_64-apple-macosx10.9.0" 5 6@A = common global [2000 x double] zeroinitializer, align 16 7@B = common global [2000 x double] zeroinitializer, align 16 8@C = common global [2000 x float] zeroinitializer, align 16 9@D = common global [2000 x float] zeroinitializer, align 16 10 11; Function Attrs: nounwind ssp uwtable 12define void @foo_3double(i32 %u) #0 { 13; CHECK-LABEL: @foo_3double( 14; CHECK-NEXT: entry: 15; CHECK-NEXT: [[U_ADDR:%.*]] = alloca i32, align 4 16; CHECK-NEXT: store i32 [[U:%.*]], i32* [[U_ADDR]], align 4 17; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[U]], 3 18; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64 19; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]] 20; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]] 21; CHECK-NEXT: [[ADD11:%.*]] = add nsw i32 [[MUL]], 1 22; CHECK-NEXT: [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64 23; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]] 24; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 25; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8 26; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]] 27; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>* 28; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 29; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]] 30; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 31; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8 32; CHECK-NEXT: [[ADD24:%.*]] = add nsw i32 [[MUL]], 2 33; CHECK-NEXT: [[IDXPROM25:%.*]] = sext i32 [[ADD24]] to i64 34; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM25]] 35; CHECK-NEXT: [[TMP6:%.*]] = load double, double* [[ARRAYIDX26]], align 8 36; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM25]] 37; CHECK-NEXT: [[TMP7:%.*]] = load double, double* [[ARRAYIDX30]], align 8 38; CHECK-NEXT: [[ADD31:%.*]] = fadd double [[TMP6]], [[TMP7]] 39; CHECK-NEXT: store double [[ADD31]], double* [[ARRAYIDX26]], align 8 40; CHECK-NEXT: ret void 41; 42entry: 43 %u.addr = alloca i32, align 4 44 store i32 %u, i32* %u.addr, align 4 45 %mul = mul nsw i32 %u, 3 46 %idxprom = sext i32 %mul to i64 47 %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom 48 %0 = load double, double* %arrayidx, align 8 49 %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom 50 %1 = load double, double* %arrayidx4, align 8 51 %add5 = fadd double %0, %1 52 store double %add5, double* %arrayidx, align 8 53 %add11 = add nsw i32 %mul, 1 54 %idxprom12 = sext i32 %add11 to i64 55 %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12 56 %2 = load double, double* %arrayidx13, align 8 57 %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12 58 %3 = load double, double* %arrayidx17, align 8 59 %add18 = fadd double %2, %3 60 store double %add18, double* %arrayidx13, align 8 61 %add24 = add nsw i32 %mul, 2 62 %idxprom25 = sext i32 %add24 to i64 63 %arrayidx26 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom25 64 %4 = load double, double* %arrayidx26, align 8 65 %arrayidx30 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom25 66 %5 = load double, double* %arrayidx30, align 8 67 %add31 = fadd double %4, %5 68 store double %add31, double* %arrayidx26, align 8 69 ret void 70} 71 72; SCEV should be able to tell that accesses A[C1 + C2*i], A[C1 + C2*i], ... 73; A[C1 + C2*i] are consecutive, if C2 is a power of 2, and C2 > C1 > 0. 74; Thus, the following code should be vectorized. 75; Function Attrs: nounwind ssp uwtable 76define void @foo_2double(i32 %u) #0 { 77; CHECK-LABEL: @foo_2double( 78; CHECK-NEXT: entry: 79; CHECK-NEXT: [[U_ADDR:%.*]] = alloca i32, align 4 80; CHECK-NEXT: store i32 [[U:%.*]], i32* [[U_ADDR]], align 4 81; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[U]], 2 82; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64 83; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]] 84; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]] 85; CHECK-NEXT: [[ADD11:%.*]] = add nsw i32 [[MUL]], 1 86; CHECK-NEXT: [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64 87; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]] 88; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 89; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8 90; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]] 91; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>* 92; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 93; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]] 94; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 95; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8 96; CHECK-NEXT: ret void 97; 98entry: 99 %u.addr = alloca i32, align 4 100 store i32 %u, i32* %u.addr, align 4 101 %mul = mul nsw i32 %u, 2 102 %idxprom = sext i32 %mul to i64 103 %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom 104 %0 = load double, double* %arrayidx, align 8 105 %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom 106 %1 = load double, double* %arrayidx4, align 8 107 %add5 = fadd double %0, %1 108 store double %add5, double* %arrayidx, align 8 109 %add11 = add nsw i32 %mul, 1 110 %idxprom12 = sext i32 %add11 to i64 111 %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12 112 %2 = load double, double* %arrayidx13, align 8 113 %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12 114 %3 = load double, double* %arrayidx17, align 8 115 %add18 = fadd double %2, %3 116 store double %add18, double* %arrayidx13, align 8 117 ret void 118} 119 120; Similar to the previous test, but with different datatype. 121; Function Attrs: nounwind ssp uwtable 122define void @foo_4float(i32 %u) #0 { 123; CHECK-LABEL: @foo_4float( 124; CHECK-NEXT: entry: 125; CHECK-NEXT: [[U_ADDR:%.*]] = alloca i32, align 4 126; CHECK-NEXT: store i32 [[U:%.*]], i32* [[U_ADDR]], align 4 127; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[U]], 4 128; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64 129; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM]] 130; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM]] 131; CHECK-NEXT: [[ADD11:%.*]] = add nsw i32 [[MUL]], 1 132; CHECK-NEXT: [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64 133; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM12]] 134; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM12]] 135; CHECK-NEXT: [[ADD24:%.*]] = add nsw i32 [[MUL]], 2 136; CHECK-NEXT: [[IDXPROM25:%.*]] = sext i32 [[ADD24]] to i64 137; CHECK-NEXT: [[ARRAYIDX26:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM25]] 138; CHECK-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM25]] 139; CHECK-NEXT: [[ADD37:%.*]] = add nsw i32 [[MUL]], 3 140; CHECK-NEXT: [[IDXPROM38:%.*]] = sext i32 [[ADD37]] to i64 141; CHECK-NEXT: [[ARRAYIDX39:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 [[IDXPROM38]] 142; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>* 143; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4 144; CHECK-NEXT: [[ARRAYIDX43:%.*]] = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 [[IDXPROM38]] 145; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[ARRAYIDX4]] to <4 x float>* 146; CHECK-NEXT: [[TMP3:%.*]] = load <4 x float>, <4 x float>* [[TMP2]], align 4 147; CHECK-NEXT: [[TMP4:%.*]] = fadd <4 x float> [[TMP1]], [[TMP3]] 148; CHECK-NEXT: [[TMP5:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>* 149; CHECK-NEXT: store <4 x float> [[TMP4]], <4 x float>* [[TMP5]], align 4 150; CHECK-NEXT: ret void 151; 152entry: 153 %u.addr = alloca i32, align 4 154 store i32 %u, i32* %u.addr, align 4 155 %mul = mul nsw i32 %u, 4 156 %idxprom = sext i32 %mul to i64 157 %arrayidx = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom 158 %0 = load float, float* %arrayidx, align 4 159 %arrayidx4 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom 160 %1 = load float, float* %arrayidx4, align 4 161 %add5 = fadd float %0, %1 162 store float %add5, float* %arrayidx, align 4 163 %add11 = add nsw i32 %mul, 1 164 %idxprom12 = sext i32 %add11 to i64 165 %arrayidx13 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom12 166 %2 = load float, float* %arrayidx13, align 4 167 %arrayidx17 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom12 168 %3 = load float, float* %arrayidx17, align 4 169 %add18 = fadd float %2, %3 170 store float %add18, float* %arrayidx13, align 4 171 %add24 = add nsw i32 %mul, 2 172 %idxprom25 = sext i32 %add24 to i64 173 %arrayidx26 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom25 174 %4 = load float, float* %arrayidx26, align 4 175 %arrayidx30 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom25 176 %5 = load float, float* %arrayidx30, align 4 177 %add31 = fadd float %4, %5 178 store float %add31, float* %arrayidx26, align 4 179 %add37 = add nsw i32 %mul, 3 180 %idxprom38 = sext i32 %add37 to i64 181 %arrayidx39 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom38 182 %6 = load float, float* %arrayidx39, align 4 183 %arrayidx43 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom38 184 %7 = load float, float* %arrayidx43, align 4 185 %add44 = fadd float %6, %7 186 store float %add44, float* %arrayidx39, align 4 187 ret void 188} 189 190; Similar to the previous tests, but now we are dealing with AddRec SCEV. 191; Function Attrs: nounwind ssp uwtable 192define i32 @foo_loop(double* %A, i32 %n) #0 { 193; CHECK-LABEL: @foo_loop( 194; CHECK-NEXT: entry: 195; CHECK-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8 196; CHECK-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 197; CHECK-NEXT: [[SUM:%.*]] = alloca double, align 8 198; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 199; CHECK-NEXT: store double* [[A:%.*]], double** [[A_ADDR]], align 8 200; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_ADDR]], align 4 201; CHECK-NEXT: store double 0.000000e+00, double* [[SUM]], align 8 202; CHECK-NEXT: store i32 0, i32* [[I]], align 4 203; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[N]] 204; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]] 205; CHECK: for.body.lr.ph: 206; CHECK-NEXT: br label [[FOR_BODY:%.*]] 207; CHECK: for.body: 208; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 209; CHECK-NEXT: [[TMP1:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD7:%.*]], [[FOR_BODY]] ] 210; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], 2 211; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[MUL]] to i64 212; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM]] 213; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[MUL]], 1 214; CHECK-NEXT: [[IDXPROM3:%.*]] = sext i32 [[ADD]] to i64 215; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM3]] 216; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 217; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 218; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]] 219; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0 220; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1 221; CHECK-NEXT: [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]] 222; CHECK-NEXT: [[ADD7]] = fadd double [[TMP1]], [[ADD6]] 223; CHECK-NEXT: store double [[ADD7]], double* [[SUM]], align 8 224; CHECK-NEXT: [[INC]] = add nsw i32 [[TMP0]], 1 225; CHECK-NEXT: store i32 [[INC]], i32* [[I]], align 4 226; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]] 227; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]] 228; CHECK: for.cond.for.end_crit_edge: 229; CHECK-NEXT: [[SPLIT:%.*]] = phi double [ [[ADD7]], [[FOR_BODY]] ] 230; CHECK-NEXT: br label [[FOR_END]] 231; CHECK: for.end: 232; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi double [ [[SPLIT]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] 233; CHECK-NEXT: [[CONV:%.*]] = fptosi double [[DOTLCSSA]] to i32 234; CHECK-NEXT: ret i32 [[CONV]] 235; 236entry: 237 %A.addr = alloca double*, align 8 238 %n.addr = alloca i32, align 4 239 %sum = alloca double, align 8 240 %i = alloca i32, align 4 241 store double* %A, double** %A.addr, align 8 242 store i32 %n, i32* %n.addr, align 4 243 store double 0.000000e+00, double* %sum, align 8 244 store i32 0, i32* %i, align 4 245 %cmp1 = icmp slt i32 0, %n 246 br i1 %cmp1, label %for.body.lr.ph, label %for.end 247 248for.body.lr.ph: ; preds = %entry 249 br label %for.body 250 251for.body: ; preds = %for.body.lr.ph, %for.body 252 %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] 253 %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ] 254 %mul = mul nsw i32 %0, 2 255 %idxprom = sext i32 %mul to i64 256 %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom 257 %2 = load double, double* %arrayidx, align 8 258 %mul1 = fmul double 7.000000e+00, %2 259 %add = add nsw i32 %mul, 1 260 %idxprom3 = sext i32 %add to i64 261 %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3 262 %3 = load double, double* %arrayidx4, align 8 263 %mul5 = fmul double 7.000000e+00, %3 264 %add6 = fadd double %mul1, %mul5 265 %add7 = fadd double %1, %add6 266 store double %add7, double* %sum, align 8 267 %inc = add nsw i32 %0, 1 268 store i32 %inc, i32* %i, align 4 269 %cmp = icmp slt i32 %inc, %n 270 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge 271 272for.cond.for.end_crit_edge: ; preds = %for.body 273 %split = phi double [ %add7, %for.body ] 274 br label %for.end 275 276for.end: ; preds = %for.cond.for.end_crit_edge, %entry 277 %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ] 278 %conv = fptosi double %.lcssa to i32 279 ret i32 %conv 280} 281 282; Similar to foo_2double but with a non-power-of-2 factor and potential 283; wrapping (both indices wrap or both don't in the same time) 284; Function Attrs: nounwind ssp uwtable 285define void @foo_2double_non_power_of_2(i32 %u) #0 { 286; CHECK-LABEL: @foo_2double_non_power_of_2( 287; CHECK-NEXT: entry: 288; CHECK-NEXT: [[U_ADDR:%.*]] = alloca i32, align 4 289; CHECK-NEXT: store i32 [[U:%.*]], i32* [[U_ADDR]], align 4 290; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[U]], 6 291; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[MUL]], 6 292; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD6]] to i64 293; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]] 294; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]] 295; CHECK-NEXT: [[ADD7:%.*]] = add i32 [[MUL]], 7 296; CHECK-NEXT: [[IDXPROM12:%.*]] = sext i32 [[ADD7]] to i64 297; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]] 298; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 299; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8 300; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]] 301; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>* 302; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 303; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]] 304; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 305; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8 306; CHECK-NEXT: ret void 307; 308entry: 309 %u.addr = alloca i32, align 4 310 store i32 %u, i32* %u.addr, align 4 311 %mul = mul i32 %u, 6 312 %add6 = add i32 %mul, 6 313 %idxprom = sext i32 %add6 to i64 314 %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom 315 %0 = load double, double* %arrayidx, align 8 316 %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom 317 %1 = load double, double* %arrayidx4, align 8 318 %add5 = fadd double %0, %1 319 store double %add5, double* %arrayidx, align 8 320 %add7 = add i32 %mul, 7 321 %idxprom12 = sext i32 %add7 to i64 322 %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12 323 %2 = load double, double* %arrayidx13, align 8 324 %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12 325 %3 = load double, double* %arrayidx17, align 8 326 %add18 = fadd double %2, %3 327 store double %add18, double* %arrayidx13, align 8 328 ret void 329} 330 331; Similar to foo_2double_non_power_of_2 but with zext's instead of sext's 332; Function Attrs: nounwind ssp uwtable 333define void @foo_2double_non_power_of_2_zext(i32 %u) #0 { 334; CHECK-LABEL: @foo_2double_non_power_of_2_zext( 335; CHECK-NEXT: entry: 336; CHECK-NEXT: [[U_ADDR:%.*]] = alloca i32, align 4 337; CHECK-NEXT: store i32 [[U:%.*]], i32* [[U_ADDR]], align 4 338; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[U]], 6 339; CHECK-NEXT: [[ADD6:%.*]] = add i32 [[MUL]], 6 340; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[ADD6]] to i64 341; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM]] 342; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM]] 343; CHECK-NEXT: [[ADD7:%.*]] = add i32 [[MUL]], 7 344; CHECK-NEXT: [[IDXPROM12:%.*]] = zext i32 [[ADD7]] to i64 345; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 [[IDXPROM12]] 346; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 347; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8 348; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 [[IDXPROM12]] 349; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[ARRAYIDX4]] to <2 x double>* 350; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 351; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]] 352; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 353; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8 354; CHECK-NEXT: ret void 355; 356entry: 357 %u.addr = alloca i32, align 4 358 store i32 %u, i32* %u.addr, align 4 359 %mul = mul i32 %u, 6 360 %add6 = add i32 %mul, 6 361 %idxprom = zext i32 %add6 to i64 362 %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom 363 %0 = load double, double* %arrayidx, align 8 364 %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom 365 %1 = load double, double* %arrayidx4, align 8 366 %add5 = fadd double %0, %1 367 store double %add5, double* %arrayidx, align 8 368 %add7 = add i32 %mul, 7 369 %idxprom12 = zext i32 %add7 to i64 370 %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12 371 %2 = load double, double* %arrayidx13, align 8 372 %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12 373 %3 = load double, double* %arrayidx17, align 8 374 %add18 = fadd double %2, %3 375 store double %add18, double* %arrayidx13, align 8 376 ret void 377} 378 379; Similar to foo_2double_non_power_of_2, but now we are dealing with AddRec SCEV. 380; Alternatively, this is like foo_loop, but with a non-power-of-2 factor and 381; potential wrapping (both indices wrap or both don't in the same time) 382; Function Attrs: nounwind ssp uwtable 383define i32 @foo_loop_non_power_of_2(double* %A, i32 %n) #0 { 384; CHECK-LABEL: @foo_loop_non_power_of_2( 385; CHECK-NEXT: entry: 386; CHECK-NEXT: [[A_ADDR:%.*]] = alloca double*, align 8 387; CHECK-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 388; CHECK-NEXT: [[SUM:%.*]] = alloca double, align 8 389; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4 390; CHECK-NEXT: store double* [[A:%.*]], double** [[A_ADDR]], align 8 391; CHECK-NEXT: store i32 [[N:%.*]], i32* [[N_ADDR]], align 4 392; CHECK-NEXT: store double 0.000000e+00, double* [[SUM]], align 8 393; CHECK-NEXT: store i32 0, i32* [[I]], align 4 394; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 0, [[N]] 395; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]] 396; CHECK: for.body.lr.ph: 397; CHECK-NEXT: br label [[FOR_BODY:%.*]] 398; CHECK: for.body: 399; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 0, [[FOR_BODY_LR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 400; CHECK-NEXT: [[TMP1:%.*]] = phi double [ 0.000000e+00, [[FOR_BODY_LR_PH]] ], [ [[ADD7:%.*]], [[FOR_BODY]] ] 401; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TMP0]], 12 402; CHECK-NEXT: [[ADD_5:%.*]] = add i32 [[MUL]], 5 403; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD_5]] to i64 404; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM]] 405; CHECK-NEXT: [[ADD_6:%.*]] = add i32 [[MUL]], 6 406; CHECK-NEXT: [[IDXPROM3:%.*]] = sext i32 [[ADD_6]] to i64 407; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM3]] 408; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 409; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8 410; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> <double 7.000000e+00, double 7.000000e+00>, [[TMP3]] 411; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[TMP4]], i32 0 412; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[TMP4]], i32 1 413; CHECK-NEXT: [[ADD6:%.*]] = fadd double [[TMP5]], [[TMP6]] 414; CHECK-NEXT: [[ADD7]] = fadd double [[TMP1]], [[ADD6]] 415; CHECK-NEXT: store double [[ADD7]], double* [[SUM]], align 8 416; CHECK-NEXT: [[INC]] = add i32 [[TMP0]], 1 417; CHECK-NEXT: store i32 [[INC]], i32* [[I]], align 4 418; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], [[N]] 419; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_FOR_END_CRIT_EDGE:%.*]] 420; CHECK: for.cond.for.end_crit_edge: 421; CHECK-NEXT: [[SPLIT:%.*]] = phi double [ [[ADD7]], [[FOR_BODY]] ] 422; CHECK-NEXT: br label [[FOR_END]] 423; CHECK: for.end: 424; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi double [ [[SPLIT]], [[FOR_COND_FOR_END_CRIT_EDGE]] ], [ 0.000000e+00, [[ENTRY:%.*]] ] 425; CHECK-NEXT: [[CONV:%.*]] = fptosi double [[DOTLCSSA]] to i32 426; CHECK-NEXT: ret i32 [[CONV]] 427; 428entry: 429 %A.addr = alloca double*, align 8 430 %n.addr = alloca i32, align 4 431 %sum = alloca double, align 8 432 %i = alloca i32, align 4 433 store double* %A, double** %A.addr, align 8 434 store i32 %n, i32* %n.addr, align 4 435 store double 0.000000e+00, double* %sum, align 8 436 store i32 0, i32* %i, align 4 437 %cmp1 = icmp slt i32 0, %n 438 br i1 %cmp1, label %for.body.lr.ph, label %for.end 439 440for.body.lr.ph: ; preds = %entry 441 br label %for.body 442 443for.body: ; preds = %for.body.lr.ph, %for.body 444 %0 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ] 445 %1 = phi double [ 0.000000e+00, %for.body.lr.ph ], [ %add7, %for.body ] 446 %mul = mul i32 %0, 12 447 %add.5 = add i32 %mul, 5 448 %idxprom = sext i32 %add.5 to i64 449 %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom 450 %2 = load double, double* %arrayidx, align 8 451 %mul1 = fmul double 7.000000e+00, %2 452 %add.6 = add i32 %mul, 6 453 %idxprom3 = sext i32 %add.6 to i64 454 %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3 455 %3 = load double, double* %arrayidx4, align 8 456 %mul5 = fmul double 7.000000e+00, %3 457 %add6 = fadd double %mul1, %mul5 458 %add7 = fadd double %1, %add6 459 store double %add7, double* %sum, align 8 460 %inc = add i32 %0, 1 461 store i32 %inc, i32* %i, align 4 462 %cmp = icmp slt i32 %inc, %n 463 br i1 %cmp, label %for.body, label %for.cond.for.end_crit_edge 464 465for.cond.for.end_crit_edge: ; preds = %for.body 466 %split = phi double [ %add7, %for.body ] 467 br label %for.end 468 469for.end: ; preds = %for.cond.for.end_crit_edge, %entry 470 %.lcssa = phi double [ %split, %for.cond.for.end_crit_edge ], [ 0.000000e+00, %entry ] 471 %conv = fptosi double %.lcssa to i32 472 ret i32 %conv 473} 474 475; This is generated by `clang -std=c11 -Wpedantic -Wall -O3 main.c -S -o - -emit-llvm` 476; with !{!"clang version 7.0.0 (trunk 337339) (llvm/trunk 337344)"} and stripping off 477; the !tbaa metadata nodes to fit the rest of the test file, where `cat main.c` is: 478; 479; double bar(double *a, unsigned n) { 480; double x = 0.0; 481; double y = 0.0; 482; for (unsigned i = 0; i < n; i += 2) { 483; x += a[i]; 484; y += a[i + 1]; 485; } 486; return x * y; 487; } 488; 489; The resulting IR is similar to @foo_loop, but with zext's instead of sext's. 490; 491; Make sure we are able to vectorize this from now on: 492; 493define double @bar(double* nocapture readonly %a, i32 %n) local_unnamed_addr #0 { 494; CHECK-LABEL: @bar( 495; CHECK-NEXT: entry: 496; CHECK-NEXT: [[CMP15:%.*]] = icmp eq i32 [[N:%.*]], 0 497; CHECK-NEXT: br i1 [[CMP15]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY:%.*]] 498; CHECK: for.cond.cleanup: 499; CHECK-NEXT: [[TMP0:%.*]] = phi <2 x double> [ zeroinitializer, [[ENTRY:%.*]] ], [ [[TMP6:%.*]], [[FOR_BODY]] ] 500; CHECK-NEXT: [[TMP1:%.*]] = extractelement <2 x double> [[TMP0]], i32 0 501; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP0]], i32 1 502; CHECK-NEXT: [[MUL:%.*]] = fmul double [[TMP1]], [[TMP2]] 503; CHECK-NEXT: ret double [[MUL]] 504; CHECK: for.body: 505; CHECK-NEXT: [[I_018:%.*]] = phi i32 [ [[ADD5:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY]] ] 506; CHECK-NEXT: [[TMP3:%.*]] = phi <2 x double> [ [[TMP6]], [[FOR_BODY]] ], [ zeroinitializer, [[ENTRY]] ] 507; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[I_018]] to i64 508; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[A:%.*]], i64 [[IDXPROM]] 509; CHECK-NEXT: [[ADD1:%.*]] = or i32 [[I_018]], 1 510; CHECK-NEXT: [[IDXPROM2:%.*]] = zext i32 [[ADD1]] to i64 511; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 [[IDXPROM2]] 512; CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[ARRAYIDX]] to <2 x double>* 513; CHECK-NEXT: [[TMP5:%.*]] = load <2 x double>, <2 x double>* [[TMP4]], align 8 514; CHECK-NEXT: [[TMP6]] = fadd <2 x double> [[TMP3]], [[TMP5]] 515; CHECK-NEXT: [[ADD5]] = add i32 [[I_018]], 2 516; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[ADD5]], [[N]] 517; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP]] 518; 519entry: 520 %cmp15 = icmp eq i32 %n, 0 521 br i1 %cmp15, label %for.cond.cleanup, label %for.body 522 523for.cond.cleanup: ; preds = %for.body, %entry 524 %x.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add, %for.body ] 525 %y.0.lcssa = phi double [ 0.000000e+00, %entry ], [ %add4, %for.body ] 526 %mul = fmul double %x.0.lcssa, %y.0.lcssa 527 ret double %mul 528 529for.body: ; preds = %entry, %for.body 530 %i.018 = phi i32 [ %add5, %for.body ], [ 0, %entry ] 531 %y.017 = phi double [ %add4, %for.body ], [ 0.000000e+00, %entry ] 532 %x.016 = phi double [ %add, %for.body ], [ 0.000000e+00, %entry ] 533 %idxprom = zext i32 %i.018 to i64 534 %arrayidx = getelementptr inbounds double, double* %a, i64 %idxprom 535 %0 = load double, double* %arrayidx, align 8 536 %add = fadd double %x.016, %0 537 %add1 = or i32 %i.018, 1 538 %idxprom2 = zext i32 %add1 to i64 539 %arrayidx3 = getelementptr inbounds double, double* %a, i64 %idxprom2 540 %1 = load double, double* %arrayidx3, align 8 541 %add4 = fadd double %y.017, %1 542 %add5 = add i32 %i.018, 2 543 %cmp = icmp ult i32 %add5, %n 544 br i1 %cmp, label %for.body, label %for.cond.cleanup 545} 546 547; Globals/constant expressions are not normal constants. 548; They should not be treated as the usual vectorization candidates. 549 550@g1 = external global i32, align 4 551@g2 = external global i32, align 4 552 553define void @PR33958(i32** nocapture %p) { 554; CHECK-LABEL: @PR33958( 555; CHECK-NEXT: store i32* @g1, i32** [[P:%.*]], align 8 556; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32*, i32** [[P]], i64 1 557; CHECK-NEXT: store i32* @g2, i32** [[ARRAYIDX1]], align 8 558; CHECK-NEXT: ret void 559; 560 store i32* @g1, i32** %p, align 8 561 %arrayidx1 = getelementptr inbounds i32*, i32** %p, i64 1 562 store i32* @g2, i32** %arrayidx1, align 8 563 ret void 564} 565 566define void @store_constant_expression(i64* %p) { 567; CHECK-LABEL: @store_constant_expression( 568; CHECK-NEXT: store i64 ptrtoint (i32* @g1 to i64), i64* [[P:%.*]], align 8 569; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 1 570; CHECK-NEXT: store i64 ptrtoint (i32* @g2 to i64), i64* [[ARRAYIDX1]], align 8 571; CHECK-NEXT: ret void 572; 573 store i64 ptrtoint (i32* @g1 to i64), i64* %p, align 8 574 %arrayidx1 = getelementptr inbounds i64, i64* %p, i64 1 575 store i64 ptrtoint (i32* @g2 to i64), i64* %arrayidx1, align 8 576 ret void 577} 578 579attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" } 580 581!llvm.ident = !{!0} 582 583!0 = !{!"clang version 3.5.0 "} 584