1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -basic-aa -loop-idiom < %s -S | FileCheck %s 3target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" 4 5; For @test11_pattern 6; CHECK: @.memset_pattern = private unnamed_addr constant [4 x i32] [i32 1, i32 1, i32 1, i32 1] 7 8; For @test13_pattern 9; CHECK: @.memset_pattern.1 = private unnamed_addr constant [2 x i32*] [i32* @G, i32* @G] 10 11target triple = "x86_64-apple-darwin10.0.0" 12 13define void @test1(i8* %Base, i64 %Size) nounwind ssp { 14; CHECK-LABEL: @test1( 15; CHECK-NEXT: bb.nph: 16; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false) 17; CHECK-NEXT: br label [[FOR_BODY:%.*]] 18; CHECK: for.body: 19; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 20; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]] 21; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 22; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 23; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 24; CHECK: for.end: 25; CHECK-NEXT: ret void 26; 27bb.nph: ; preds = %entry 28 br label %for.body 29 30for.body: ; preds = %bb.nph, %for.body 31 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 32 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar 33 store i8 0, i8* %I.0.014, align 1 34 %indvar.next = add i64 %indvar, 1 35 %exitcond = icmp eq i64 %indvar.next, %Size 36 br i1 %exitcond, label %for.end, label %for.body 37 38for.end: ; preds = %for.body, %entry 39 ret void 40} 41 42; Make sure memset is formed for larger than 1 byte stores, and that the 43; alignment of the store is preserved 44define void @test1_i16(i16* align 2 %Base, i64 %Size) nounwind ssp { 45; CHECK-LABEL: @test1_i16( 46; CHECK-NEXT: bb.nph: 47; CHECK-NEXT: [[BASE1:%.*]] = bitcast i16* [[BASE:%.*]] to i8* 48; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 1 49; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 2 [[BASE1]], i8 0, i64 [[TMP0]], i1 false) 50; CHECK-NEXT: br label [[FOR_BODY:%.*]] 51; CHECK: for.body: 52; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 53; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i16, i16* [[BASE]], i64 [[INDVAR]] 54; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 55; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 56; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 57; CHECK: for.end: 58; CHECK-NEXT: ret void 59; 60bb.nph: ; preds = %entry 61 br label %for.body 62 63for.body: ; preds = %bb.nph, %for.body 64 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 65 %I.0.014 = getelementptr i16, i16* %Base, i64 %indvar 66 store i16 0, i16* %I.0.014, align 2 67 %indvar.next = add i64 %indvar, 1 68 %exitcond = icmp eq i64 %indvar.next, %Size 69 br i1 %exitcond, label %for.end, label %for.body 70 71for.end: ; preds = %for.body, %entry 72 ret void 73} 74 75; This is a loop that was rotated but where the blocks weren't merged. This 76; shouldn't perturb us. 77define void @test1a(i8* %Base, i64 %Size) nounwind ssp { 78; CHECK-LABEL: @test1a( 79; CHECK-NEXT: bb.nph: 80; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false) 81; CHECK-NEXT: br label [[FOR_BODY:%.*]] 82; CHECK: for.body: 83; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY_CONT:%.*]] ] 84; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]] 85; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 86; CHECK-NEXT: br label [[FOR_BODY_CONT]] 87; CHECK: for.body.cont: 88; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 89; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 90; CHECK: for.end: 91; CHECK-NEXT: ret void 92; 93bb.nph: ; preds = %entry 94 br label %for.body 95 96for.body: ; preds = %bb.nph, %for.body 97 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body.cont ] 98 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar 99 store i8 0, i8* %I.0.014, align 1 100 %indvar.next = add i64 %indvar, 1 101 br label %for.body.cont 102for.body.cont: 103 %exitcond = icmp eq i64 %indvar.next, %Size 104 br i1 %exitcond, label %for.end, label %for.body 105 106for.end: ; preds = %for.body, %entry 107 ret void 108} 109 110 111define void @test2(i32* %Base, i64 %Size) nounwind ssp { 112; CHECK-LABEL: @test2( 113; CHECK-NEXT: entry: 114; CHECK-NEXT: [[BASE1:%.*]] = bitcast i32* [[BASE:%.*]] to i8* 115; CHECK-NEXT: [[CMP10:%.*]] = icmp eq i64 [[SIZE:%.*]], 0 116; CHECK-NEXT: br i1 [[CMP10]], label [[FOR_END:%.*]], label [[FOR_BODY_PREHEADER:%.*]] 117; CHECK: for.body.preheader: 118; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE]], 2 119; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[BASE1]], i8 1, i64 [[TMP0]], i1 false) 120; CHECK-NEXT: br label [[FOR_BODY:%.*]] 121; CHECK: for.body: 122; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[FOR_BODY_PREHEADER]] ] 123; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[I_011]] 124; CHECK-NEXT: [[INC]] = add nsw i64 [[I_011]], 1 125; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[SIZE]] 126; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]] 127; CHECK: for.end.loopexit: 128; CHECK-NEXT: br label [[FOR_END]] 129; CHECK: for.end: 130; CHECK-NEXT: ret void 131; 132entry: 133 %cmp10 = icmp eq i64 %Size, 0 134 br i1 %cmp10, label %for.end, label %for.body 135 136for.body: ; preds = %entry, %for.body 137 %i.011 = phi i64 [ %inc, %for.body ], [ 0, %entry ] 138 %add.ptr.i = getelementptr i32, i32* %Base, i64 %i.011 139 store i32 16843009, i32* %add.ptr.i, align 4 140 %inc = add nsw i64 %i.011, 1 141 %exitcond = icmp eq i64 %inc, %Size 142 br i1 %exitcond, label %for.end, label %for.body 143 144for.end: ; preds = %for.body, %entry 145 ret void 146} 147 148; This is a case where there is an extra may-aliased store in the loop, we can't 149; promote the memset. 150define void @test3(i32* %Base, i64 %Size, i8 *%MayAlias) nounwind ssp { 151; CHECK-LABEL: @test3( 152; CHECK-NEXT: entry: 153; CHECK-NEXT: br label [[FOR_BODY:%.*]] 154; CHECK: for.body: 155; CHECK-NEXT: [[I_011:%.*]] = phi i64 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] 156; CHECK-NEXT: [[ADD_PTR_I:%.*]] = getelementptr i32, i32* [[BASE:%.*]], i64 [[I_011]] 157; CHECK-NEXT: store i32 16843009, i32* [[ADD_PTR_I]], align 4 158; CHECK-NEXT: store i8 42, i8* [[MAYALIAS:%.*]], align 1 159; CHECK-NEXT: [[INC]] = add nsw i64 [[I_011]], 1 160; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INC]], [[SIZE:%.*]] 161; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 162; CHECK: for.end: 163; CHECK-NEXT: ret void 164; 165entry: 166 br label %for.body 167 168for.body: ; preds = %entry, %for.body 169 %i.011 = phi i64 [ %inc, %for.body ], [ 0, %entry ] 170 %add.ptr.i = getelementptr i32, i32* %Base, i64 %i.011 171 store i32 16843009, i32* %add.ptr.i, align 4 172 173 store i8 42, i8* %MayAlias 174 %inc = add nsw i64 %i.011, 1 175 %exitcond = icmp eq i64 %inc, %Size 176 br i1 %exitcond, label %for.end, label %for.body 177 178for.end: ; preds = %entry 179 ret void 180} 181 182; Make sure the first store in the loop is turned into a memset. 183define void @test4(i8* %Base) nounwind ssp { 184; CHECK-LABEL: @test4( 185; CHECK-NEXT: bb.nph: 186; CHECK-NEXT: [[BASE100:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 1000 187; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE]], i8 0, i64 100, i1 false) 188; CHECK-NEXT: br label [[FOR_BODY:%.*]] 189; CHECK: for.body: 190; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 191; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]] 192; CHECK-NEXT: store i8 42, i8* [[BASE100]], align 1 193; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 194; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 100 195; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 196; CHECK: for.end: 197; CHECK-NEXT: ret void 198; 199bb.nph: ; preds = %entry 200 %Base100 = getelementptr i8, i8* %Base, i64 1000 201 br label %for.body 202 203for.body: ; preds = %bb.nph, %for.body 204 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 205 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar 206 store i8 0, i8* %I.0.014, align 1 207 208 ;; Store beyond the range memset, should be safe to promote. 209 store i8 42, i8* %Base100 210 211 %indvar.next = add i64 %indvar, 1 212 %exitcond = icmp eq i64 %indvar.next, 100 213 br i1 %exitcond, label %for.end, label %for.body 214 215for.end: ; preds = %for.body, %entry 216 ret void 217} 218 219; This can't be promoted: the memset is a store of a loop variant value. 220define void @test5(i8* %Base, i64 %Size) nounwind ssp { 221; CHECK-LABEL: @test5( 222; CHECK-NEXT: bb.nph: 223; CHECK-NEXT: br label [[FOR_BODY:%.*]] 224; CHECK: for.body: 225; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 226; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i64 [[INDVAR]] 227; CHECK-NEXT: [[V:%.*]] = trunc i64 [[INDVAR]] to i8 228; CHECK-NEXT: store i8 [[V]], i8* [[I_0_014]], align 1 229; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 230; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]] 231; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 232; CHECK: for.end: 233; CHECK-NEXT: ret void 234; 235bb.nph: ; preds = %entry 236 br label %for.body 237 238for.body: ; preds = %bb.nph, %for.body 239 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 240 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar 241 242 %V = trunc i64 %indvar to i8 243 store i8 %V, i8* %I.0.014, align 1 244 %indvar.next = add i64 %indvar, 1 245 %exitcond = icmp eq i64 %indvar.next, %Size 246 br i1 %exitcond, label %for.end, label %for.body 247 248for.end: ; preds = %for.body, %entry 249 ret void 250} 251 252 253;; memcpy formation 254define void @test6(i64 %Size) nounwind ssp { 255; CHECK-LABEL: @test6( 256; CHECK-NEXT: bb.nph: 257; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1 258; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1 259; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST]], i8* align 1 [[BASE]], i64 [[SIZE:%.*]], i1 false) 260; CHECK-NEXT: br label [[FOR_BODY:%.*]] 261; CHECK: for.body: 262; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 263; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]] 264; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]] 265; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1 266; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 267; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 268; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 269; CHECK: for.end: 270; CHECK-NEXT: ret void 271; 272bb.nph: 273 %Base = alloca i8, i32 10000 274 %Dest = alloca i8, i32 10000 275 br label %for.body 276 277for.body: ; preds = %bb.nph, %for.body 278 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 279 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar 280 %DestI = getelementptr i8, i8* %Dest, i64 %indvar 281 %V = load i8, i8* %I.0.014, align 1 282 store i8 %V, i8* %DestI, align 1 283 %indvar.next = add i64 %indvar, 1 284 %exitcond = icmp eq i64 %indvar.next, %Size 285 br i1 %exitcond, label %for.end, label %for.body 286 287for.end: ; preds = %for.body, %entry 288 ret void 289} 290 291;; memcpy formation, check alignment 292define void @test6_dest_align(i32* noalias align 1 %Base, i32* noalias align 4 %Dest, i64 %Size) nounwind ssp { 293; CHECK-LABEL: @test6_dest_align( 294; CHECK-NEXT: bb.nph: 295; CHECK-NEXT: [[DEST1:%.*]] = bitcast i32* [[DEST:%.*]] to i8* 296; CHECK-NEXT: [[BASE2:%.*]] = bitcast i32* [[BASE:%.*]] to i8* 297; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 2 298; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[DEST1]], i8* align 1 [[BASE2]], i64 [[TMP0]], i1 false) 299; CHECK-NEXT: br label [[FOR_BODY:%.*]] 300; CHECK: for.body: 301; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 302; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]] 303; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]] 304; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[I_0_014]], align 1 305; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 306; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 307; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 308; CHECK: for.end: 309; CHECK-NEXT: ret void 310; 311bb.nph: 312 br label %for.body 313 314for.body: ; preds = %bb.nph, %for.body 315 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 316 %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar 317 %DestI = getelementptr i32, i32* %Dest, i64 %indvar 318 %V = load i32, i32* %I.0.014, align 1 319 store i32 %V, i32* %DestI, align 4 320 %indvar.next = add i64 %indvar, 1 321 %exitcond = icmp eq i64 %indvar.next, %Size 322 br i1 %exitcond, label %for.end, label %for.body 323 324for.end: ; preds = %for.body, %entry 325 ret void 326} 327 328;; memcpy formation, check alignment 329define void @test6_src_align(i32* noalias align 4 %Base, i32* noalias align 1 %Dest, i64 %Size) nounwind ssp { 330; CHECK-LABEL: @test6_src_align( 331; CHECK-NEXT: bb.nph: 332; CHECK-NEXT: [[DEST1:%.*]] = bitcast i32* [[DEST:%.*]] to i8* 333; CHECK-NEXT: [[BASE2:%.*]] = bitcast i32* [[BASE:%.*]] to i8* 334; CHECK-NEXT: [[TMP0:%.*]] = shl nuw i64 [[SIZE:%.*]], 2 335; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST1]], i8* align 4 [[BASE2]], i64 [[TMP0]], i1 false) 336; CHECK-NEXT: br label [[FOR_BODY:%.*]] 337; CHECK: for.body: 338; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 339; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i32, i32* [[BASE]], i64 [[INDVAR]] 340; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i32, i32* [[DEST]], i64 [[INDVAR]] 341; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[I_0_014]], align 4 342; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 343; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 344; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 345; CHECK: for.end: 346; CHECK-NEXT: ret void 347; 348bb.nph: 349 br label %for.body 350 351for.body: ; preds = %bb.nph, %for.body 352 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 353 %I.0.014 = getelementptr i32, i32* %Base, i64 %indvar 354 %DestI = getelementptr i32, i32* %Dest, i64 %indvar 355 %V = load i32, i32* %I.0.014, align 4 356 store i32 %V, i32* %DestI, align 1 357 %indvar.next = add i64 %indvar, 1 358 %exitcond = icmp eq i64 %indvar.next, %Size 359 br i1 %exitcond, label %for.end, label %for.body 360 361for.end: ; preds = %for.body, %entry 362 ret void 363} 364 365 366; This is a loop that was rotated but where the blocks weren't merged. This 367; shouldn't perturb us. 368define void @test7(i8* %Base, i64 %Size) nounwind ssp { 369; CHECK-LABEL: @test7( 370; CHECK-NEXT: bb.nph: 371; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[BASE:%.*]], i8 0, i64 [[SIZE:%.*]], i1 false) 372; CHECK-NEXT: br label [[FOR_BODY:%.*]] 373; CHECK: for.body: 374; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY_CONT:%.*]] ] 375; CHECK-NEXT: br label [[FOR_BODY_CONT]] 376; CHECK: for.body.cont: 377; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]] 378; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 379; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 380; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 381; CHECK: for.end: 382; CHECK-NEXT: ret void 383; 384bb.nph: ; preds = %entry 385 br label %for.body 386 387for.body: ; preds = %bb.nph, %for.body 388 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body.cont ] 389 br label %for.body.cont 390for.body.cont: 391 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar 392 store i8 0, i8* %I.0.014, align 1 393 %indvar.next = add i64 %indvar, 1 394 %exitcond = icmp eq i64 %indvar.next, %Size 395 br i1 %exitcond, label %for.end, label %for.body 396 397for.end: ; preds = %for.body, %entry 398 ret void 399} 400 401; This is a loop should not be transformed, it only executes one iteration. 402define void @test8(i64* %Ptr, i64 %Size) nounwind ssp { 403; CHECK-LABEL: @test8( 404; CHECK-NEXT: bb.nph: 405; CHECK-NEXT: br label [[FOR_BODY:%.*]] 406; CHECK: for.body: 407; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 408; CHECK-NEXT: [[PI:%.*]] = getelementptr i64, i64* [[PTR:%.*]], i64 [[INDVAR]] 409; CHECK-NEXT: store i64 0, i64* [[PI]], align 8 410; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 411; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 1 412; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 413; CHECK: for.end: 414; CHECK-NEXT: ret void 415; 416bb.nph: ; preds = %entry 417 br label %for.body 418 419for.body: ; preds = %bb.nph, %for.body 420 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 421 %PI = getelementptr i64, i64* %Ptr, i64 %indvar 422 store i64 0, i64 *%PI 423 %indvar.next = add i64 %indvar, 1 424 %exitcond = icmp eq i64 %indvar.next, 1 425 br i1 %exitcond, label %for.end, label %for.body 426 427for.end: ; preds = %for.body, %entry 428 ret void 429} 430 431declare i8* @external(i8*) 432 433;; This cannot be transformed into a memcpy, because the read-from location is 434;; mutated by the loop. 435define void @test9(i64 %Size) nounwind ssp { 436; CHECK-LABEL: @test9( 437; CHECK-NEXT: bb.nph: 438; CHECK-NEXT: [[BASE:%.*]] = alloca i8, i32 10000, align 1 439; CHECK-NEXT: [[DEST:%.*]] = alloca i8, i32 10000, align 1 440; CHECK-NEXT: [[BASEALIAS:%.*]] = call i8* @external(i8* [[BASE]]) 441; CHECK-NEXT: br label [[FOR_BODY:%.*]] 442; CHECK: for.body: 443; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 444; CHECK-NEXT: [[I_0_014:%.*]] = getelementptr i8, i8* [[BASE]], i64 [[INDVAR]] 445; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]] 446; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[I_0_014]], align 1 447; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1 448; CHECK-NEXT: store i8 4, i8* [[BASEALIAS]], align 1 449; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 450; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]] 451; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 452; CHECK: for.end: 453; CHECK-NEXT: ret void 454; 455bb.nph: 456 %Base = alloca i8, i32 10000 457 %Dest = alloca i8, i32 10000 458 459 %BaseAlias = call i8* @external(i8* %Base) 460 br label %for.body 461 462for.body: ; preds = %bb.nph, %for.body 463 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 464 %I.0.014 = getelementptr i8, i8* %Base, i64 %indvar 465 %DestI = getelementptr i8, i8* %Dest, i64 %indvar 466 %V = load i8, i8* %I.0.014, align 1 467 store i8 %V, i8* %DestI, align 1 468 469 ;; This store can clobber the input. 470 store i8 4, i8* %BaseAlias 471 472 %indvar.next = add i64 %indvar, 1 473 %exitcond = icmp eq i64 %indvar.next, %Size 474 br i1 %exitcond, label %for.end, label %for.body 475 476for.end: ; preds = %for.body, %entry 477 ret void 478} 479 480; Two dimensional nested loop should be promoted to one big memset. 481define void @test10(i8* %X) nounwind ssp { 482; CHECK-LABEL: @test10( 483; CHECK-NEXT: entry: 484; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[X:%.*]], i8 0, i64 10000, i1 false) 485; CHECK-NEXT: br label [[BB_NPH:%.*]] 486; CHECK: bb.nph: 487; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_INC10:%.*]] ], [ 0, [[ENTRY:%.*]] ] 488; CHECK-NEXT: [[I_04:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC12:%.*]], [[FOR_INC10]] ] 489; CHECK-NEXT: [[TMP0:%.*]] = mul nuw nsw i64 [[INDVAR]], 100 490; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[X]], i64 [[TMP0]] 491; CHECK-NEXT: br label [[FOR_BODY5:%.*]] 492; CHECK: for.body5: 493; CHECK-NEXT: [[J_02:%.*]] = phi i32 [ 0, [[BB_NPH]] ], [ [[INC:%.*]], [[FOR_BODY5]] ] 494; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_04]], 100 495; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[J_02]], [[MUL]] 496; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64 497; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[X]], i64 [[IDXPROM]] 498; CHECK-NEXT: [[INC]] = add nsw i32 [[J_02]], 1 499; CHECK-NEXT: [[CMP4:%.*]] = icmp eq i32 [[INC]], 100 500; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_INC10]], label [[FOR_BODY5]] 501; CHECK: for.inc10: 502; CHECK-NEXT: [[INC12]] = add nsw i32 [[I_04]], 1 503; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[INC12]], 100 504; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 505; CHECK-NEXT: br i1 [[CMP]], label [[FOR_END13:%.*]], label [[BB_NPH]] 506; CHECK: for.end13: 507; CHECK-NEXT: ret void 508; 509entry: 510 br label %bb.nph 511 512bb.nph: ; preds = %entry, %for.inc10 513 %i.04 = phi i32 [ 0, %entry ], [ %inc12, %for.inc10 ] 514 br label %for.body5 515 516for.body5: ; preds = %for.body5, %bb.nph 517 %j.02 = phi i32 [ 0, %bb.nph ], [ %inc, %for.body5 ] 518 %mul = mul nsw i32 %i.04, 100 519 %add = add nsw i32 %j.02, %mul 520 %idxprom = sext i32 %add to i64 521 %arrayidx = getelementptr inbounds i8, i8* %X, i64 %idxprom 522 store i8 0, i8* %arrayidx, align 1 523 %inc = add nsw i32 %j.02, 1 524 %cmp4 = icmp eq i32 %inc, 100 525 br i1 %cmp4, label %for.inc10, label %for.body5 526 527for.inc10: ; preds = %for.body5 528 %inc12 = add nsw i32 %i.04, 1 529 %cmp = icmp eq i32 %inc12, 100 530 br i1 %cmp, label %for.end13, label %bb.nph 531 532for.end13: ; preds = %for.inc10 533 ret void 534} 535 536; On darwin10 (which is the triple in this .ll file) this loop can be turned 537; into a memset_pattern call. 538; rdar://9009151 539define void @test11_pattern(i32* nocapture %P) nounwind ssp { 540; CHECK-LABEL: @test11_pattern( 541; CHECK-NEXT: entry: 542; CHECK-NEXT: [[P1:%.*]] = bitcast i32* [[P:%.*]] to i8* 543; CHECK-NEXT: call void @memset_pattern16(i8* [[P1]], i8* bitcast ([4 x i32]* @.memset_pattern to i8*), i64 40000) 544; CHECK-NEXT: br label [[FOR_BODY:%.*]] 545; CHECK: for.body: 546; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 547; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32, i32* [[P]], i64 [[INDVAR]] 548; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 549; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000 550; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 551; CHECK: for.end: 552; CHECK-NEXT: ret void 553; 554entry: 555 br label %for.body 556 557for.body: ; preds = %entry, %for.body 558 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ] 559 %arrayidx = getelementptr i32, i32* %P, i64 %indvar 560 store i32 1, i32* %arrayidx, align 4 561 %indvar.next = add i64 %indvar, 1 562 %exitcond = icmp eq i64 %indvar.next, 10000 563 br i1 %exitcond, label %for.end, label %for.body 564 565for.end: ; preds = %for.body 566 ret void 567} 568 569; Store of null should turn into memset of zero. 570define void @test12(i32** nocapture %P) nounwind ssp { 571; CHECK-LABEL: @test12( 572; CHECK-NEXT: entry: 573; CHECK-NEXT: [[P1:%.*]] = bitcast i32** [[P:%.*]] to i8* 574; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[P1]], i8 0, i64 80000, i1 false) 575; CHECK-NEXT: br label [[FOR_BODY:%.*]] 576; CHECK: for.body: 577; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 578; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32*, i32** [[P]], i64 [[INDVAR]] 579; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 580; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000 581; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 582; CHECK: for.end: 583; CHECK-NEXT: ret void 584; 585entry: 586 br label %for.body 587 588for.body: ; preds = %entry, %for.body 589 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ] 590 %arrayidx = getelementptr i32*, i32** %P, i64 %indvar 591 store i32* null, i32** %arrayidx, align 4 592 %indvar.next = add i64 %indvar, 1 593 %exitcond = icmp eq i64 %indvar.next, 10000 594 br i1 %exitcond, label %for.end, label %for.body 595 596for.end: ; preds = %for.body 597 ret void 598} 599 600@G = global i32 5 601 602; This store-of-address loop can be turned into a memset_pattern call. 603; rdar://9009151 604define void @test13_pattern(i32** nocapture %P) nounwind ssp { 605; CHECK-LABEL: @test13_pattern( 606; CHECK-NEXT: entry: 607; CHECK-NEXT: [[P1:%.*]] = bitcast i32** [[P:%.*]] to i8* 608; CHECK-NEXT: call void @memset_pattern16(i8* [[P1]], i8* bitcast ([2 x i32*]* @.memset_pattern.1 to i8*), i64 80000) 609; CHECK-NEXT: br label [[FOR_BODY:%.*]] 610; CHECK: for.body: 611; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 612; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i32*, i32** [[P]], i64 [[INDVAR]] 613; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 614; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], 10000 615; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 616; CHECK: for.end: 617; CHECK-NEXT: ret void 618; 619entry: 620 br label %for.body 621 622for.body: ; preds = %entry, %for.body 623 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %for.body ] 624 %arrayidx = getelementptr i32*, i32** %P, i64 %indvar 625 store i32* @G, i32** %arrayidx, align 4 626 %indvar.next = add i64 %indvar, 1 627 %exitcond = icmp eq i64 %indvar.next, 10000 628 br i1 %exitcond, label %for.end, label %for.body 629 630for.end: ; preds = %for.body 631 ret void 632} 633 634 635 636; PR9815 - This is a partial overlap case that cannot be safely transformed 637; into a memcpy. 638@g_50 = global [7 x i32] [i32 0, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0], align 16 639 640define i32 @test14() nounwind { 641; CHECK-LABEL: @test14( 642; CHECK-NEXT: entry: 643; CHECK-NEXT: br label [[FOR_BODY:%.*]] 644; CHECK: for.body: 645; CHECK-NEXT: [[T5:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] 646; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[T5]], 4 647; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64 648; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 [[IDXPROM]] 649; CHECK-NEXT: [[T2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 650; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[T5]], 5 651; CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[ADD4]] to i64 652; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 [[IDXPROM5]] 653; CHECK-NEXT: store i32 [[T2]], i32* [[ARRAYIDX6]], align 4 654; CHECK-NEXT: [[INC]] = add nsw i32 [[T5]], 1 655; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[INC]], 2 656; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]] 657; CHECK: for.end: 658; CHECK-NEXT: [[T8:%.*]] = load i32, i32* getelementptr inbounds ([7 x i32], [7 x i32]* @g_50, i32 0, i64 6), align 4 659; CHECK-NEXT: ret i32 [[T8]] 660; 661entry: 662 br label %for.body 663 664for.body: ; preds = %for.inc, %for.body.lr.ph 665 %t5 = phi i32 [ %inc, %for.body ], [ 0, %entry ] 666 %add = add nsw i32 %t5, 4 667 %idxprom = sext i32 %add to i64 668 %arrayidx = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom 669 %t2 = load i32, i32* %arrayidx, align 4 670 %add4 = add nsw i32 %t5, 5 671 %idxprom5 = sext i32 %add4 to i64 672 %arrayidx6 = getelementptr inbounds [7 x i32], [7 x i32]* @g_50, i32 0, i64 %idxprom5 673 store i32 %t2, i32* %arrayidx6, align 4 674 %inc = add nsw i32 %t5, 1 675 %cmp = icmp slt i32 %inc, 2 676 br i1 %cmp, label %for.body, label %for.end 677 678for.end: ; preds = %for.inc 679 %t8 = load i32, i32* getelementptr inbounds ([7 x i32], [7 x i32]* @g_50, i32 0, i64 6), align 4 680 ret i32 %t8 681 682} 683 684define void @PR14241(i32* %s, i64 %size) { 685; Ensure that we don't form a memcpy for strided loops. Briefly, when we taught 686; LoopIdiom about memmove and strided loops, this got miscompiled into a memcpy 687; instead of a memmove. If we get the memmove transform back, this will catch 688; regressions. 689; 690; CHECK-LABEL: @PR14241( 691; CHECK-NEXT: entry: 692; CHECK-NEXT: [[S1:%.*]] = bitcast i32* [[S:%.*]] to i8* 693; CHECK-NEXT: [[END_IDX:%.*]] = add i64 [[SIZE:%.*]], -1 694; CHECK-NEXT: [[END_PTR:%.*]] = getelementptr inbounds i32, i32* [[S]], i64 [[END_IDX]] 695; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[S]], i64 1 696; CHECK-NEXT: [[SCEVGEP2:%.*]] = bitcast i32* [[SCEVGEP]] to i8* 697; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[SIZE]], 2 698; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], -8 699; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 2 700; CHECK-NEXT: [[TMP3:%.*]] = shl nuw i64 [[TMP2]], 2 701; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[TMP3]], 4 702; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 4 [[S1]], i8* align 4 [[SCEVGEP2]], i64 [[TMP4]], i1 false) 703; CHECK-NEXT: br label [[WHILE_BODY:%.*]] 704; CHECK: while.body: 705; CHECK-NEXT: [[PHI_PTR:%.*]] = phi i32* [ [[S]], [[ENTRY:%.*]] ], [ [[NEXT_PTR:%.*]], [[WHILE_BODY]] ] 706; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 1 707; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[SRC_PTR]], align 4 708; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 0 709; CHECK-NEXT: [[NEXT_PTR]] = getelementptr inbounds i32, i32* [[PHI_PTR]], i64 1 710; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[NEXT_PTR]], [[END_PTR]] 711; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[WHILE_BODY]] 712; CHECK: exit: 713; CHECK-NEXT: ret void 714; 715 716entry: 717 %end.idx = add i64 %size, -1 718 %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx 719 br label %while.body 720 721while.body: 722 %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ] 723 %src.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1 724 %val = load i32, i32* %src.ptr, align 4 725 %dst.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 0 726 store i32 %val, i32* %dst.ptr, align 4 727 %next.ptr = getelementptr inbounds i32, i32* %phi.ptr, i64 1 728 %cmp = icmp eq i32* %next.ptr, %end.ptr 729 br i1 %cmp, label %exit, label %while.body 730 731exit: 732 ret void 733} 734 735; Recognize loops with a negative stride. 736define void @test15(i32* nocapture %f) { 737; CHECK-LABEL: @test15( 738; CHECK-NEXT: entry: 739; CHECK-NEXT: [[F1:%.*]] = bitcast i32* [[F:%.*]] to i8* 740; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[F1]], i8 0, i64 262148, i1 false) 741; CHECK-NEXT: br label [[FOR_BODY:%.*]] 742; CHECK: for.body: 743; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 65536, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] 744; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]] 745; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 746; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0 747; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] 748; CHECK: for.cond.cleanup: 749; CHECK-NEXT: ret void 750; 751entry: 752 br label %for.body 753 754for.body: 755 %indvars.iv = phi i64 [ 65536, %entry ], [ %indvars.iv.next, %for.body ] 756 %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv 757 store i32 0, i32* %arrayidx, align 4 758 %indvars.iv.next = add nsw i64 %indvars.iv, -1 759 %cmp = icmp sgt i64 %indvars.iv, 0 760 br i1 %cmp, label %for.body, label %for.cond.cleanup 761 762for.cond.cleanup: 763 ret void 764} 765 766; Loop with a negative stride. Verify an aliasing write to f[65536] prevents 767; the creation of a memset. 768define void @test16(i32* nocapture %f) { 769; CHECK-LABEL: @test16( 770; CHECK-NEXT: entry: 771; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[F:%.*]], i64 65536 772; CHECK-NEXT: br label [[FOR_BODY:%.*]] 773; CHECK: for.body: 774; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 65536, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] 775; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[F]], i64 [[INDVARS_IV]] 776; CHECK-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4 777; CHECK-NEXT: store i32 1, i32* [[ARRAYIDX1]], align 4 778; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 779; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0 780; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] 781; CHECK: for.cond.cleanup: 782; CHECK-NEXT: ret void 783; 784entry: 785 %arrayidx1 = getelementptr inbounds i32, i32* %f, i64 65536 786 br label %for.body 787 788for.body: ; preds = %entry, %for.body 789 %indvars.iv = phi i64 [ 65536, %entry ], [ %indvars.iv.next, %for.body ] 790 %arrayidx = getelementptr inbounds i32, i32* %f, i64 %indvars.iv 791 store i32 0, i32* %arrayidx, align 4 792 store i32 1, i32* %arrayidx1, align 4 793 %indvars.iv.next = add nsw i64 %indvars.iv, -1 794 %cmp = icmp sgt i64 %indvars.iv, 0 795 br i1 %cmp, label %for.body, label %for.cond.cleanup 796 797for.cond.cleanup: ; preds = %for.body 798 ret void 799} 800 801; Handle memcpy-able loops with negative stride. 802define noalias i32* @test17(i32* nocapture readonly %a, i32 %c) { 803; CHECK-LABEL: @test17( 804; CHECK-NEXT: entry: 805; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[C:%.*]] to i64 806; CHECK-NEXT: [[MUL:%.*]] = shl nsw i64 [[CONV]], 2 807; CHECK-NEXT: [[CALL:%.*]] = tail call noalias i8* @malloc(i64 [[MUL]]) 808; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[CALL]] to i32* 809; CHECK-NEXT: [[TOBOOL_9:%.*]] = icmp eq i32 [[C]], 0 810; CHECK-NEXT: br i1 [[TOBOOL_9]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]] 811; CHECK: while.body.preheader: 812; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[C]], -1 813; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[TMP1]] to i64 814; CHECK-NEXT: [[TMP3:%.*]] = shl nsw i64 [[TMP2]], 2 815; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP1]] to i64 816; CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 2 817; CHECK-NEXT: [[TMP6:%.*]] = sub i64 [[TMP3]], [[TMP5]] 818; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[CALL]], i64 [[TMP6]] 819; CHECK-NEXT: [[TMP7:%.*]] = sub i64 [[TMP2]], [[TMP4]] 820; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i32, i32* [[A:%.*]], i64 [[TMP7]] 821; CHECK-NEXT: [[SCEVGEP12:%.*]] = bitcast i32* [[SCEVGEP1]] to i8* 822; CHECK-NEXT: [[TMP8:%.*]] = zext i32 [[C]] to i64 823; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP8]], 2 824; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[SCEVGEP]], i8* align 4 [[SCEVGEP12]], i64 [[TMP9]], i1 false) 825; CHECK-NEXT: br label [[WHILE_BODY:%.*]] 826; CHECK: while.body: 827; CHECK-NEXT: [[DEC10_IN:%.*]] = phi i32 [ [[DEC10:%.*]], [[WHILE_BODY]] ], [ [[C]], [[WHILE_BODY_PREHEADER]] ] 828; CHECK-NEXT: [[DEC10]] = add nsw i32 [[DEC10_IN]], -1 829; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[DEC10]] to i64 830; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IDXPROM]] 831; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 832; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i64 [[IDXPROM]] 833; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[DEC10]], 0 834; CHECK-NEXT: br i1 [[TOBOOL]], label [[WHILE_END_LOOPEXIT:%.*]], label [[WHILE_BODY]] 835; CHECK: while.end.loopexit: 836; CHECK-NEXT: br label [[WHILE_END]] 837; CHECK: while.end: 838; CHECK-NEXT: ret i32* [[TMP0]] 839; 840entry: 841 %conv = sext i32 %c to i64 842 %mul = shl nsw i64 %conv, 2 843 %call = tail call noalias i8* @malloc(i64 %mul) 844 %0 = bitcast i8* %call to i32* 845 %tobool.9 = icmp eq i32 %c, 0 846 br i1 %tobool.9, label %while.end, label %while.body.preheader 847 848while.body.preheader: ; preds = %entry 849 br label %while.body 850 851while.body: ; preds = %while.body.preheader, %while.body 852 %dec10.in = phi i32 [ %dec10, %while.body ], [ %c, %while.body.preheader ] 853 %dec10 = add nsw i32 %dec10.in, -1 854 %idxprom = sext i32 %dec10 to i64 855 %arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom 856 %1 = load i32, i32* %arrayidx, align 4 857 %arrayidx2 = getelementptr inbounds i32, i32* %0, i64 %idxprom 858 store i32 %1, i32* %arrayidx2, align 4 859 %tobool = icmp eq i32 %dec10, 0 860 br i1 %tobool, label %while.end.loopexit, label %while.body 861 862while.end.loopexit: ; preds = %while.body 863 br label %while.end 864 865while.end: ; preds = %while.end.loopexit, %entry 866 ret i32* %0 867} 868 869declare noalias i8* @malloc(i64) 870 871; Handle memcpy-able loops with negative stride. 872; void test18(unsigned *__restrict__ a, unsigned *__restrict__ b) { 873; for (int i = 2047; i >= 0; --i) { 874; a[i] = b[i]; 875; } 876; } 877define void @test18(i32* noalias nocapture %a, i32* noalias nocapture readonly %b) #0 { 878; CHECK-LABEL: @test18( 879; CHECK-NEXT: entry: 880; CHECK-NEXT: [[A1:%.*]] = bitcast i32* [[A:%.*]] to i8* 881; CHECK-NEXT: [[B2:%.*]] = bitcast i32* [[B:%.*]] to i8* 882; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A1]], i8* align 4 [[B2]], i64 8192, i1 false) 883; CHECK-NEXT: br label [[FOR_BODY:%.*]] 884; CHECK: for.body: 885; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 2047, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] 886; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]] 887; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 888; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]] 889; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nsw i64 [[INDVARS_IV]], -1 890; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[INDVARS_IV]], 0 891; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] 892; CHECK: for.cond.cleanup: 893; CHECK-NEXT: ret void 894; 895entry: 896 br label %for.body 897 898for.body: ; preds = %entry, %for.body 899 %indvars.iv = phi i64 [ 2047, %entry ], [ %indvars.iv.next, %for.body ] 900 %arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv 901 %0 = load i32, i32* %arrayidx, align 4 902 %arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv 903 store i32 %0, i32* %arrayidx2, align 4 904 %indvars.iv.next = add nsw i64 %indvars.iv, -1 905 %cmp = icmp sgt i64 %indvars.iv, 0 906 br i1 %cmp, label %for.body, label %for.cond.cleanup 907 908for.cond.cleanup: ; preds = %for.body 909 ret void 910} 911 912; Two dimensional nested loop with negative stride should be promoted to one big memset. 913define void @test19(i8* nocapture %X) { 914; CHECK-LABEL: @test19( 915; CHECK-NEXT: entry: 916; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 1 [[X:%.*]], i8 0, i64 10000, i1 false) 917; CHECK-NEXT: br label [[FOR_COND1_PREHEADER:%.*]] 918; CHECK: for.cond1.preheader: 919; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_INC4:%.*]] ], [ 0, [[ENTRY:%.*]] ] 920; CHECK-NEXT: [[I_06:%.*]] = phi i32 [ 99, [[ENTRY]] ], [ [[DEC5:%.*]], [[FOR_INC4]] ] 921; CHECK-NEXT: [[TMP0:%.*]] = mul nsw i64 [[INDVAR]], -100 922; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], 9900 923; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[X]], i64 [[TMP1]] 924; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[I_06]], 100 925; CHECK-NEXT: br label [[FOR_BODY3:%.*]] 926; CHECK: for.body3: 927; CHECK-NEXT: [[J_05:%.*]] = phi i32 [ 99, [[FOR_COND1_PREHEADER]] ], [ [[DEC:%.*]], [[FOR_BODY3]] ] 928; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[J_05]], [[MUL]] 929; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[ADD]] to i64 930; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i8, i8* [[X]], i64 [[IDXPROM]] 931; CHECK-NEXT: [[DEC]] = add nsw i32 [[J_05]], -1 932; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[J_05]], 0 933; CHECK-NEXT: br i1 [[CMP2]], label [[FOR_BODY3]], label [[FOR_INC4]] 934; CHECK: for.inc4: 935; CHECK-NEXT: [[DEC5]] = add nsw i32 [[I_06]], -1 936; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[I_06]], 0 937; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 938; CHECK-NEXT: br i1 [[CMP]], label [[FOR_COND1_PREHEADER]], label [[FOR_END6:%.*]] 939; CHECK: for.end6: 940; CHECK-NEXT: ret void 941; 942entry: 943 br label %for.cond1.preheader 944 945for.cond1.preheader: ; preds = %entry, %for.inc4 946 %i.06 = phi i32 [ 99, %entry ], [ %dec5, %for.inc4 ] 947 %mul = mul nsw i32 %i.06, 100 948 br label %for.body3 949 950for.body3: ; preds = %for.cond1.preheader, %for.body3 951 %j.05 = phi i32 [ 99, %for.cond1.preheader ], [ %dec, %for.body3 ] 952 %add = add nsw i32 %j.05, %mul 953 %idxprom = sext i32 %add to i64 954 %arrayidx = getelementptr inbounds i8, i8* %X, i64 %idxprom 955 store i8 0, i8* %arrayidx, align 1 956 %dec = add nsw i32 %j.05, -1 957 %cmp2 = icmp sgt i32 %j.05, 0 958 br i1 %cmp2, label %for.body3, label %for.inc4 959 960for.inc4: ; preds = %for.body3 961 %dec5 = add nsw i32 %i.06, -1 962 %cmp = icmp sgt i32 %i.06, 0 963 br i1 %cmp, label %for.cond1.preheader, label %for.end6 964 965for.end6: ; preds = %for.inc4 966 ret void 967} 968 969; Handle loops where the trip count is a narrow integer that needs to be 970; extended. 971define void @form_memset_narrow_size(i64* %ptr, i32 %size) { 972; CHECK-LABEL: @form_memset_narrow_size( 973; CHECK-NEXT: entry: 974; CHECK-NEXT: [[PTR1:%.*]] = bitcast i64* [[PTR:%.*]] to i8* 975; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[SIZE:%.*]], 0 976; CHECK-NEXT: br i1 [[CMP1]], label [[LOOP_PH:%.*]], label [[EXIT:%.*]] 977; CHECK: loop.ph: 978; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SIZE]] to i64 979; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3 980; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 8 [[PTR1]], i8 0, i64 [[TMP1]], i1 false) 981; CHECK-NEXT: br label [[LOOP_BODY:%.*]] 982; CHECK: loop.body: 983; CHECK-NEXT: [[STOREMERGE4:%.*]] = phi i32 [ 0, [[LOOP_PH]] ], [ [[INC:%.*]], [[LOOP_BODY]] ] 984; CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[STOREMERGE4]] to i64 985; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, i64* [[PTR]], i64 [[IDXPROM]] 986; CHECK-NEXT: [[INC]] = add nsw i32 [[STOREMERGE4]], 1 987; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[INC]], [[SIZE]] 988; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_BODY]], label [[LOOP_EXIT:%.*]] 989; CHECK: loop.exit: 990; CHECK-NEXT: br label [[EXIT]] 991; CHECK: exit: 992; CHECK-NEXT: ret void 993; 994entry: 995 %cmp1 = icmp sgt i32 %size, 0 996 br i1 %cmp1, label %loop.ph, label %exit 997 998loop.ph: 999 br label %loop.body 1000 1001loop.body: 1002 %storemerge4 = phi i32 [ 0, %loop.ph ], [ %inc, %loop.body ] 1003 %idxprom = sext i32 %storemerge4 to i64 1004 %arrayidx = getelementptr inbounds i64, i64* %ptr, i64 %idxprom 1005 store i64 0, i64* %arrayidx, align 8 1006 %inc = add nsw i32 %storemerge4, 1 1007 %cmp2 = icmp slt i32 %inc, %size 1008 br i1 %cmp2, label %loop.body, label %loop.exit 1009 1010loop.exit: 1011 br label %exit 1012 1013exit: 1014 ret void 1015} 1016 1017define void @form_memcpy_narrow_size(i64* noalias %dst, i64* noalias %src, i32 %size) { 1018; CHECK-LABEL: @form_memcpy_narrow_size( 1019; CHECK-NEXT: entry: 1020; CHECK-NEXT: [[DST1:%.*]] = bitcast i64* [[DST:%.*]] to i8* 1021; CHECK-NEXT: [[SRC2:%.*]] = bitcast i64* [[SRC:%.*]] to i8* 1022; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[SIZE:%.*]], 0 1023; CHECK-NEXT: br i1 [[CMP1]], label [[LOOP_PH:%.*]], label [[EXIT:%.*]] 1024; CHECK: loop.ph: 1025; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SIZE]] to i64 1026; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3 1027; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DST1]], i8* align 8 [[SRC2]], i64 [[TMP1]], i1 false) 1028; CHECK-NEXT: br label [[LOOP_BODY:%.*]] 1029; CHECK: loop.body: 1030; CHECK-NEXT: [[STOREMERGE4:%.*]] = phi i32 [ 0, [[LOOP_PH]] ], [ [[INC:%.*]], [[LOOP_BODY]] ] 1031; CHECK-NEXT: [[IDXPROM1:%.*]] = sext i32 [[STOREMERGE4]] to i64 1032; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i64, i64* [[SRC]], i64 [[IDXPROM1]] 1033; CHECK-NEXT: [[V:%.*]] = load i64, i64* [[ARRAYIDX1]], align 8 1034; CHECK-NEXT: [[IDXPROM2:%.*]] = sext i32 [[STOREMERGE4]] to i64 1035; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i64, i64* [[DST]], i64 [[IDXPROM2]] 1036; CHECK-NEXT: [[INC]] = add nsw i32 [[STOREMERGE4]], 1 1037; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[INC]], [[SIZE]] 1038; CHECK-NEXT: br i1 [[CMP2]], label [[LOOP_BODY]], label [[LOOP_EXIT:%.*]] 1039; CHECK: loop.exit: 1040; CHECK-NEXT: br label [[EXIT]] 1041; CHECK: exit: 1042; CHECK-NEXT: ret void 1043; 1044entry: 1045 %cmp1 = icmp sgt i32 %size, 0 1046 br i1 %cmp1, label %loop.ph, label %exit 1047 1048loop.ph: 1049 br label %loop.body 1050 1051loop.body: 1052 %storemerge4 = phi i32 [ 0, %loop.ph ], [ %inc, %loop.body ] 1053 %idxprom1 = sext i32 %storemerge4 to i64 1054 %arrayidx1 = getelementptr inbounds i64, i64* %src, i64 %idxprom1 1055 %v = load i64, i64* %arrayidx1, align 8 1056 %idxprom2 = sext i32 %storemerge4 to i64 1057 %arrayidx2 = getelementptr inbounds i64, i64* %dst, i64 %idxprom2 1058 store i64 %v, i64* %arrayidx2, align 8 1059 %inc = add nsw i32 %storemerge4, 1 1060 %cmp2 = icmp slt i32 %inc, %size 1061 br i1 %cmp2, label %loop.body, label %loop.exit 1062 1063loop.exit: 1064 br label %exit 1065 1066exit: 1067 ret void 1068} 1069 1070;; Memmove formation. 1071define void @PR46179_positive_stride(i8* %Src, i64 %Size) { 1072; CHECK-LABEL: @PR46179_positive_stride( 1073; CHECK-NEXT: bb.nph: 1074; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1 1075; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SRC]], i8* align 1 [[SCEVGEP]], i64 [[SIZE:%.*]], i1 false) 1076; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1077; CHECK: for.body: 1078; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 1079; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1 1080; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]] 1081; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]] 1082; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1 1083; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 1084; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 1085; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 1086; CHECK: for.end: 1087; CHECK-NEXT: ret void 1088; 1089bb.nph: 1090 br label %for.body 1091 1092for.body: ; preds = %bb.nph, %for.body 1093 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 1094 %Step = add nuw nsw i64 %indvar, 1 1095 %SrcI = getelementptr i8, i8* %Src, i64 %Step 1096 %DestI = getelementptr i8, i8* %Src, i64 %indvar 1097 %V = load i8, i8* %SrcI, align 1 1098 store i8 %V, i8* %DestI, align 1 1099 %indvar.next = add i64 %indvar, 1 1100 %exitcond = icmp eq i64 %indvar.next, %Size 1101 br i1 %exitcond, label %for.end, label %for.body 1102 1103for.end: ; preds = %for.body, %entry 1104 ret void 1105} 1106 1107declare void @llvm.memcpy.p0i8.p0i8.i64(i8* noalias nocapture writeonly, i8* noalias nocapture readonly, i64, i1 immarg) 1108 1109;; Memmove formation. We expect exactly same memmove result like in PR46179_positive_stride output. 1110define void @loop_with_memcpy_PR46179_positive_stride(i8* %Src, i64 %Size) { 1111; CHECK-LABEL: @loop_with_memcpy_PR46179_positive_stride( 1112; CHECK-NEXT: bb.nph: 1113; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1 1114; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SRC]], i8* align 1 [[SCEVGEP]], i64 [[SIZE:%.*]], i1 false) 1115; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1116; CHECK: for.body: 1117; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 1118; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1 1119; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]] 1120; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]] 1121; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 1122; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 1123; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 1124; CHECK: for.end: 1125; CHECK-NEXT: ret void 1126; 1127bb.nph: 1128 br label %for.body 1129 1130for.body: ; preds = %bb.nph, %for.body 1131 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 1132 %Step = add nuw nsw i64 %indvar, 1 1133 %SrcI = getelementptr i8, i8* %Src, i64 %Step 1134 %DestI = getelementptr i8, i8* %Src, i64 %indvar 1135 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false) 1136 %indvar.next = add i64 %indvar, 1 1137 %exitcond = icmp eq i64 %indvar.next, %Size 1138 br i1 %exitcond, label %for.end, label %for.body 1139 1140for.end: ; preds = %for.body, %entry 1141 ret void 1142} 1143 1144;; Memmove formation. 1145define void @PR46179_negative_stride(i8* %Src, i64 %Size) { 1146; CHECK-LABEL: @PR46179_negative_stride( 1147; CHECK-NEXT: bb.nph: 1148; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0 1149; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]] 1150; CHECK: for.body.preheader: 1151; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1 1152; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SCEVGEP]], i8* align 1 [[SRC]], i64 [[SIZE]], i1 false) 1153; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1154; CHECK: for.body: 1155; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[STEP:%.*]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ] 1156; CHECK-NEXT: [[STEP]] = add nsw i64 [[INDVAR]], -1 1157; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[STEP]] 1158; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1 1159; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]] 1160; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1 1161; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]] 1162; CHECK: for.end.loopexit: 1163; CHECK-NEXT: br label [[FOR_END]] 1164; CHECK: for.end: 1165; CHECK-NEXT: ret void 1166; 1167bb.nph: 1168 %cmp1 = icmp sgt i64 %Size, 0 1169 br i1 %cmp1, label %for.body, label %for.end 1170 1171for.body: ; preds = %bb.nph, %.for.body 1172 %indvar = phi i64 [ %Step, %for.body ], [ %Size, %bb.nph ] 1173 %Step = add nsw i64 %indvar, -1 1174 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step 1175 %V = load i8, i8* %SrcI, align 1 1176 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar 1177 store i8 %V, i8* %DestI, align 1 1178 %exitcond = icmp sgt i64 %indvar, 1 1179 br i1 %exitcond, label %for.body, label %for.end 1180 1181for.end: ; preds = %.for.body, %bb.nph 1182 ret void 1183} 1184 1185;; Memmove formation. We expect exactly same memmove result like in PR46179_negative_stride output. 1186define void @loop_with_memcpy_PR46179_negative_stride(i8* %Src, i64 %Size) { 1187; CHECK-LABEL: @loop_with_memcpy_PR46179_negative_stride( 1188; CHECK-NEXT: bb.nph: 1189; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0 1190; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]] 1191; CHECK: for.body.preheader: 1192; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 1 1193; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SCEVGEP]], i8* align 1 [[SRC]], i64 [[SIZE]], i1 false) 1194; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1195; CHECK: for.body: 1196; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[STEP:%.*]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ] 1197; CHECK-NEXT: [[STEP]] = add nsw i64 [[INDVAR]], -1 1198; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[STEP]] 1199; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]] 1200; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1 1201; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]] 1202; CHECK: for.end.loopexit: 1203; CHECK-NEXT: br label [[FOR_END]] 1204; CHECK: for.end: 1205; CHECK-NEXT: ret void 1206; 1207bb.nph: 1208 %cmp1 = icmp sgt i64 %Size, 0 1209 br i1 %cmp1, label %for.body, label %for.end 1210 1211for.body: ; preds = %bb.nph, %.for.body 1212 %indvar = phi i64 [ %Step, %for.body ], [ %Size, %bb.nph ] 1213 %Step = add nsw i64 %indvar, -1 1214 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step 1215 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar 1216 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false) 1217 %exitcond = icmp sgt i64 %indvar, 1 1218 br i1 %exitcond, label %for.body, label %for.end 1219 1220for.end: ; preds = %.for.body, %bb.nph 1221 ret void 1222} 1223 1224;; Memmove formation. 1225define void @loop_with_memcpy_stride16(i8* %Src, i64 %Size) { 1226; CHECK-LABEL: @loop_with_memcpy_stride16( 1227; CHECK-NEXT: bb.nph: 1228; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 16 1229; CHECK-NEXT: [[SMAX:%.*]] = call i64 @llvm.smax.i64(i64 [[SIZE:%.*]], i64 16) 1230; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[SMAX]], -1 1231; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 4 1232; CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 1233; CHECK-NEXT: [[TMP3:%.*]] = add nuw i64 [[TMP2]], 16 1234; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i64(i8* align 1 [[SRC]], i8* align 1 [[SCEVGEP]], i64 [[TMP3]], i1 false) 1235; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1236; CHECK: for.body: 1237; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[STEP:%.*]], [[FOR_BODY]] ], [ 0, [[BB_NPH:%.*]] ] 1238; CHECK-NEXT: [[STEP]] = add nuw nsw i64 [[INDVAR]], 16 1239; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[STEP]] 1240; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]] 1241; CHECK-NEXT: [[EXITCOND:%.*]] = icmp slt i64 [[STEP]], [[SIZE]] 1242; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END:%.*]] 1243; CHECK: for.end: 1244; CHECK-NEXT: ret void 1245; 1246bb.nph: 1247 br label %for.body 1248 1249for.body: ; preds = %for.body, %bb.nph 1250 %indvar = phi i64 [ %Step, %for.body ], [ 0, %bb.nph ] 1251 %Step = add nuw nsw i64 %indvar, 16 1252 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step 1253 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar 1254 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 16, i1 false) 1255 %exitcond = icmp slt i64 %Step, %Size 1256 br i1 %exitcond, label %for.body, label %for.end 1257 1258for.end: ; preds = %for.body 1259 ret void 1260} 1261 1262;; Do not form memmove from previous load when stride is positive. 1263define void @do_not_form_memmove1(i8* %Src, i64 %Size) { 1264; CHECK-LABEL: @do_not_form_memmove1( 1265; CHECK-NEXT: bb.nph: 1266; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1267; CHECK: for.body: 1268; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 1, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 1269; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], -1 1270; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 [[STEP]] 1271; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]] 1272; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1 1273; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1 1274; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 1275; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]] 1276; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 1277; CHECK: for.end: 1278; CHECK-NEXT: ret void 1279; 1280bb.nph: 1281 br label %for.body 1282 1283for.body: ; preds = %bb.nph, %for.body 1284 %indvar = phi i64 [ 1, %bb.nph ], [ %indvar.next, %for.body ] 1285 %Step = add nuw nsw i64 %indvar, -1 1286 %SrcI = getelementptr i8, i8* %Src, i64 %Step 1287 %DestI = getelementptr i8, i8* %Src, i64 %indvar 1288 %V = load i8, i8* %SrcI, align 1 1289 store i8 %V, i8* %DestI, align 1 1290 %indvar.next = add i64 %indvar, 1 1291 %exitcond = icmp eq i64 %indvar.next, %Size 1292 br i1 %exitcond, label %for.end, label %for.body 1293 1294for.end: ; preds = %for.body, %entry 1295 ret void 1296} 1297 1298;; Do not form memmove from previous load in memcpy when stride is positive. 1299define void @do_not_form_memmove2(i8* %Src, i64 %Size) { 1300; CHECK-LABEL: @do_not_form_memmove2( 1301; CHECK-NEXT: bb.nph: 1302; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1303; CHECK: for.body: 1304; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 1, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 1305; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], -1 1306; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 [[STEP]] 1307; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]] 1308; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DESTI]], i8* align 1 [[SRCI]], i64 1, i1 false) 1309; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 1310; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]] 1311; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 1312; CHECK: for.end: 1313; CHECK-NEXT: ret void 1314; 1315bb.nph: 1316 br label %for.body 1317 1318for.body: ; preds = %bb.nph, %for.body 1319 %indvar = phi i64 [ 1, %bb.nph ], [ %indvar.next, %for.body ] 1320 %Step = add nuw nsw i64 %indvar, -1 1321 %SrcI = getelementptr i8, i8* %Src, i64 %Step 1322 %DestI = getelementptr i8, i8* %Src, i64 %indvar 1323 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false) 1324 %indvar.next = add i64 %indvar, 1 1325 %exitcond = icmp eq i64 %indvar.next, %Size 1326 br i1 %exitcond, label %for.end, label %for.body 1327 1328for.end: ; preds = %for.body, %entry 1329 ret void 1330} 1331 1332;; Do not form memmove from next load when stride is negative. 1333define void @do_not_form_memmove3(i8* %Src, i64 %Size) { 1334; CHECK-LABEL: @do_not_form_memmove3( 1335; CHECK-NEXT: bb.nph: 1336; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0 1337; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]] 1338; CHECK: for.body.preheader: 1339; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1340; CHECK: for.body: 1341; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ] 1342; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1 1343; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[STEP]] 1344; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1 1345; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]] 1346; CHECK-NEXT: store i8 [[V]], i8* [[DESTI]], align 1 1347; CHECK-NEXT: [[INDVAR_NEXT]] = add nsw i64 [[INDVAR]], -1 1348; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1 1349; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]] 1350; CHECK: for.end.loopexit: 1351; CHECK-NEXT: br label [[FOR_END]] 1352; CHECK: for.end: 1353; CHECK-NEXT: ret void 1354; 1355bb.nph: 1356 %cmp1 = icmp sgt i64 %Size, 0 1357 br i1 %cmp1, label %for.body, label %for.end 1358 1359for.body: ; preds = %bb.nph, %.for.body 1360 %indvar = phi i64 [ %indvar.next, %for.body ], [ %Size, %bb.nph ] 1361 %Step = add nuw nsw i64 %indvar, 1 1362 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step 1363 %V = load i8, i8* %SrcI, align 1 1364 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar 1365 store i8 %V, i8* %DestI, align 1 1366 %indvar.next = add nsw i64 %indvar, -1 1367 %exitcond = icmp sgt i64 %indvar, 1 1368 br i1 %exitcond, label %for.body, label %for.end 1369 1370for.end: ; preds = %.for.body, %bb.nph 1371 ret void 1372} 1373 1374;; Do not form memmove from next load in memcpy when stride is negative. 1375define void @do_not_form_memmove4(i8* %Src, i64 %Size) { 1376; CHECK-LABEL: @do_not_form_memmove4( 1377; CHECK-NEXT: bb.nph: 1378; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i64 [[SIZE:%.*]], 0 1379; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_PREHEADER:%.*]], label [[FOR_END:%.*]] 1380; CHECK: for.body.preheader: 1381; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1382; CHECK: for.body: 1383; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ], [ [[SIZE]], [[FOR_BODY_PREHEADER]] ] 1384; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1 1385; CHECK-NEXT: [[SRCI:%.*]] = getelementptr inbounds i8, i8* [[SRC:%.*]], i64 [[STEP]] 1386; CHECK-NEXT: [[DESTI:%.*]] = getelementptr inbounds i8, i8* [[SRC]], i64 [[INDVAR]] 1387; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DESTI]], i8* align 1 [[SRCI]], i64 1, i1 false) 1388; CHECK-NEXT: [[INDVAR_NEXT]] = add nsw i64 [[INDVAR]], -1 1389; CHECK-NEXT: [[EXITCOND:%.*]] = icmp sgt i64 [[INDVAR]], 1 1390; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT:%.*]] 1391; CHECK: for.end.loopexit: 1392; CHECK-NEXT: br label [[FOR_END]] 1393; CHECK: for.end: 1394; CHECK-NEXT: ret void 1395; 1396bb.nph: 1397 %cmp1 = icmp sgt i64 %Size, 0 1398 br i1 %cmp1, label %for.body, label %for.end 1399 1400for.body: ; preds = %bb.nph, %.for.body 1401 %indvar = phi i64 [ %indvar.next, %for.body ], [ %Size, %bb.nph ] 1402 %Step = add nuw nsw i64 %indvar, 1 1403 %SrcI = getelementptr inbounds i8, i8* %Src, i64 %Step 1404 %DestI = getelementptr inbounds i8, i8* %Src, i64 %indvar 1405 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false) 1406 %indvar.next = add nsw i64 %indvar, -1 1407 %exitcond = icmp sgt i64 %indvar, 1 1408 br i1 %exitcond, label %for.body, label %for.end 1409 1410for.end: ; preds = %.for.body, %bb.nph 1411 ret void 1412} 1413 1414;; Do not form memmove when underaligned load is overlapped with store. 1415define void @do_not_form_memmove5(i32* %s, i64 %size) { 1416; CHECK-LABEL: @do_not_form_memmove5( 1417; CHECK-NEXT: entry: 1418; CHECK-NEXT: [[END_IDX:%.*]] = add i64 [[SIZE:%.*]], -1 1419; CHECK-NEXT: [[END_PTR:%.*]] = getelementptr inbounds i32, i32* [[S:%.*]], i64 [[END_IDX]] 1420; CHECK-NEXT: br label [[WHILE_BODY:%.*]] 1421; CHECK: while.body: 1422; CHECK-NEXT: [[PHI_PTR:%.*]] = phi i32* [ [[S]], [[ENTRY:%.*]] ], [ [[NEXT_PTR:%.*]], [[WHILE_BODY]] ] 1423; CHECK-NEXT: [[NEXT:%.*]] = bitcast i32* [[PHI_PTR]] to i16* 1424; CHECK-NEXT: [[SRC_PTR:%.*]] = getelementptr i16, i16* [[NEXT]], i64 1 1425; CHECK-NEXT: [[SRC_PTR2:%.*]] = bitcast i16* [[SRC_PTR]] to i32* 1426; CHECK-NEXT: [[VAL:%.*]] = load i32, i32* [[SRC_PTR2]], align 2 1427; CHECK-NEXT: [[DST_PTR:%.*]] = getelementptr i32, i32* [[PHI_PTR]], i64 0 1428; CHECK-NEXT: store i32 [[VAL]], i32* [[DST_PTR]], align 4 1429; CHECK-NEXT: [[NEXT_PTR]] = getelementptr i32, i32* [[PHI_PTR]], i64 1 1430; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32* [[NEXT_PTR]], [[END_PTR]] 1431; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[WHILE_BODY]] 1432; CHECK: exit: 1433; CHECK-NEXT: ret void 1434; 1435entry: 1436 %end.idx = add i64 %size, -1 1437 %end.ptr = getelementptr inbounds i32, i32* %s, i64 %end.idx 1438 br label %while.body 1439 1440while.body: 1441 %phi.ptr = phi i32* [ %s, %entry ], [ %next.ptr, %while.body ] 1442 %next = bitcast i32* %phi.ptr to i16* 1443 %src.ptr = getelementptr i16, i16* %next, i64 1 1444 %src.ptr2 = bitcast i16* %src.ptr to i32* 1445 ; below underaligned load is overlapped with store. 1446 %val = load i32, i32* %src.ptr2, align 2 1447 %dst.ptr = getelementptr i32, i32* %phi.ptr, i64 0 1448 store i32 %val, i32* %dst.ptr, align 4 1449 %next.ptr = getelementptr i32, i32* %phi.ptr, i64 1 1450 %cmp = icmp eq i32* %next.ptr, %end.ptr 1451 br i1 %cmp, label %exit, label %while.body 1452 1453exit: 1454 ret void 1455} 1456 1457;; Do not form memmove for memcpy with aliasing store. 1458define void @do_not_form_memmove6(i8* %Src, i64 %Size) { 1459; CHECK-LABEL: @do_not_form_memmove6( 1460; CHECK-NEXT: bb.nph: 1461; CHECK-NEXT: [[BASEALIAS:%.*]] = call i8* @external(i8* [[SRC:%.*]]) 1462; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1463; CHECK: for.body: 1464; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 1465; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 1 1466; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]] 1467; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[INDVAR]] 1468; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DESTI]], i8* align 1 [[SRCI]], i64 1, i1 false) 1469; CHECK-NEXT: store i8 4, i8* [[BASEALIAS]], align 1 1470; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 1471; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE:%.*]] 1472; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 1473; CHECK: for.end: 1474; CHECK-NEXT: ret void 1475; 1476bb.nph: 1477 %BaseAlias = call i8* @external(i8* %Src) 1478 br label %for.body 1479 1480for.body: ; preds = %bb.nph, %for.body 1481 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 1482 %Step = add nuw nsw i64 %indvar, 1 1483 %SrcI = getelementptr i8, i8* %Src, i64 %Step 1484 %DestI = getelementptr i8, i8* %Src, i64 %indvar 1485 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 %DestI, i8* align 1 %SrcI, i64 1, i1 false) 1486 store i8 4, i8* %BaseAlias 1487 %indvar.next = add i64 %indvar, 1 1488 %exitcond = icmp eq i64 %indvar.next, %Size 1489 br i1 %exitcond, label %for.end, label %for.body 1490 1491for.end: ; preds = %for.body, %entry 1492 ret void 1493} 1494 1495;; Do not form memmove when load has more than one use. 1496define i32 @do_not_form_memmove7(i32* %p) { 1497; CHECK-LABEL: @do_not_form_memmove7( 1498; CHECK-NEXT: entry: 1499; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1500; CHECK: for.cond.cleanup: 1501; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD:%.*]], [[FOR_BODY]] ] 1502; CHECK-NEXT: ret i32 [[ADD_LCSSA]] 1503; CHECK: for.body: 1504; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 15, [[ENTRY:%.*]] ], [ [[SUB:%.*]], [[FOR_BODY]] ] 1505; CHECK-NEXT: [[SUM:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD]], [[FOR_BODY]] ] 1506; CHECK-NEXT: [[SUB]] = add nsw i32 [[INDEX]], -1 1507; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[SUB]] to i64 1508; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 [[TMP0]] 1509; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1510; CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[INDEX]] to i64 1511; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 [[IDXPROM]] 1512; CHECK-NEXT: store i32 [[TMP1]], i32* [[ARRAYIDX2]], align 4 1513; CHECK-NEXT: [[ADD]] = add nsw i32 [[TMP1]], [[SUM]] 1514; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[INDEX]], 1 1515; CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] 1516; 1517entry: 1518 br label %for.body 1519 1520for.cond.cleanup: ; preds = %for.body 1521 %add.lcssa = phi i32 [ %add, %for.body ] 1522 ret i32 %add.lcssa 1523 1524for.body: ; preds = %entry, %for.body 1525 %index = phi i32 [ 15, %entry ], [ %sub, %for.body ] 1526 %sum = phi i32 [ 0, %entry ], [ %add, %for.body ] 1527 %sub = add nsw i32 %index, -1 1528 %0 = zext i32 %sub to i64 1529 %arrayidx = getelementptr inbounds i32, i32* %p, i64 %0 1530 %1 = load i32, i32* %arrayidx, align 4 1531 %idxprom = zext i32 %index to i64 1532 %arrayidx2 = getelementptr inbounds i32, i32* %p, i64 %idxprom 1533 store i32 %1, i32* %arrayidx2, align 4 1534 %add = add nsw i32 %1, %sum 1535 %cmp = icmp sgt i32 %index, 1 1536 br i1 %cmp, label %for.body, label %for.cond.cleanup 1537} 1538 1539; Do not form memmove when there's an aliasing operation, even 1540; if the memcpy source and destination are in the same object. 1541define void @do_not_form_memmove8(i64* %p) { 1542; CHECK-LABEL: @do_not_form_memmove8( 1543; CHECK-NEXT: entry: 1544; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds i64, i64* [[P:%.*]], i64 1000 1545; CHECK-NEXT: br label [[LOOP:%.*]] 1546; CHECK: exit: 1547; CHECK-NEXT: ret void 1548; CHECK: loop: 1549; CHECK-NEXT: [[X4:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[X13:%.*]], [[LOOP]] ] 1550; CHECK-NEXT: [[X5:%.*]] = zext i32 [[X4]] to i64 1551; CHECK-NEXT: [[X7:%.*]] = getelementptr inbounds i64, i64* [[P2]], i64 [[X5]] 1552; CHECK-NEXT: [[X8:%.*]] = bitcast i64* [[X7]] to i8* 1553; CHECK-NEXT: store i64 1, i64* [[X7]], align 4 1554; CHECK-NEXT: [[X11:%.*]] = getelementptr inbounds i64, i64* [[P]], i64 [[X5]] 1555; CHECK-NEXT: [[X12:%.*]] = bitcast i64* [[X11]] to i8* 1556; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* [[X12]], i8* [[X8]], i64 8, i1 false) 1557; CHECK-NEXT: [[X13]] = add i32 [[X4]], 1 1558; CHECK-NEXT: [[X14:%.*]] = icmp eq i32 [[X13]], 44 1559; CHECK-NEXT: br i1 [[X14]], label [[EXIT:%.*]], label [[LOOP]] 1560; 1561entry: 1562 %p2 = getelementptr inbounds i64, i64* %p, i64 1000 1563 br label %loop 1564 1565exit: 1566 ret void 1567 1568loop: 1569 %x4 = phi i32 [ 0, %entry ], [ %x13, %loop ] 1570 %x5 = zext i32 %x4 to i64 1571 %x7 = getelementptr inbounds i64, i64* %p2, i64 %x5 1572 %x8 = bitcast i64* %x7 to i8* 1573 store i64 1, i64* %x7, align 4 1574 %x11 = getelementptr inbounds i64, i64* %p, i64 %x5 1575 %x12 = bitcast i64* %x11 to i8* 1576 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x12, i8* %x8, i64 8, i1 false) 1577 %x13 = add i32 %x4, 1 1578 %x14 = icmp eq i32 %x13, 44 1579 br i1 %x14, label %exit, label %loop 1580} 1581 1582;; Memcpy formation is still preferred over memmove. 1583define void @prefer_memcpy_over_memmove(i8* noalias %Src, i8* noalias %Dest, i64 %Size) { 1584; CHECK-LABEL: @prefer_memcpy_over_memmove( 1585; CHECK-NEXT: bb.nph: 1586; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, i8* [[SRC:%.*]], i64 42 1587; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 1 [[DEST:%.*]], i8* align 1 [[SCEVGEP]], i64 [[SIZE:%.*]], i1 false) 1588; CHECK-NEXT: br label [[FOR_BODY:%.*]] 1589; CHECK: for.body: 1590; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ 0, [[BB_NPH:%.*]] ], [ [[INDVAR_NEXT:%.*]], [[FOR_BODY]] ] 1591; CHECK-NEXT: [[STEP:%.*]] = add nuw nsw i64 [[INDVAR]], 42 1592; CHECK-NEXT: [[SRCI:%.*]] = getelementptr i8, i8* [[SRC]], i64 [[STEP]] 1593; CHECK-NEXT: [[DESTI:%.*]] = getelementptr i8, i8* [[DEST]], i64 [[INDVAR]] 1594; CHECK-NEXT: [[V:%.*]] = load i8, i8* [[SRCI]], align 1 1595; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 1596; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVAR_NEXT]], [[SIZE]] 1597; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 1598; CHECK: for.end: 1599; CHECK-NEXT: ret void 1600; 1601bb.nph: 1602 br label %for.body 1603 1604for.body: ; preds = %bb.nph, %for.body 1605 %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.body ] 1606 %Step = add nuw nsw i64 %indvar, 42 1607 %SrcI = getelementptr i8, i8* %Src, i64 %Step 1608 %DestI = getelementptr i8, i8* %Dest, i64 %indvar 1609 %V = load i8, i8* %SrcI, align 1 1610 store i8 %V, i8* %DestI, align 1 1611 %indvar.next = add i64 %indvar, 1 1612 %exitcond = icmp eq i64 %indvar.next, %Size 1613 br i1 %exitcond, label %for.end, label %for.body 1614 1615for.end: ; preds = %for.body, %entry 1616 ret void 1617} 1618 1619; Validate that "memset_pattern" has the proper attributes. 1620