1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt < %s -basic-aa -memcpyopt -dse -S -verify-memoryssa | FileCheck %s 3 4target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128" 5target triple = "i686-apple-darwin9" 6 7%0 = type { x86_fp80, x86_fp80 } 8%1 = type { i32, i32 } 9 10@C = external constant [0 x i8] 11 12declare void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* nocapture, i8* nocapture, i64, i1) nounwind 13declare void @llvm.memcpy.p0i8.p1i8.i64(i8* nocapture, i8 addrspace(1)* nocapture, i64, i1) nounwind 14declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* nocapture, i8 addrspace(1)* nocapture, i64, i1) nounwind 15declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind 16declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i1) nounwind 17declare void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i1) nounwind 18declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind 19 20; Check that one of the memcpy's are removed. 21;; FIXME: PR 8643 We should be able to eliminate the last memcpy here. 22define void @test1(%0* sret(%0) %agg.result, x86_fp80 %z.0, x86_fp80 %z.1) nounwind { 23; CHECK-LABEL: @test1( 24; CHECK-NEXT: entry: 25; CHECK-NEXT: [[TMP2:%.*]] = alloca [[TMP0:%.*]], align 16 26; CHECK-NEXT: [[MEMTMP:%.*]] = alloca [[TMP0]], align 16 27; CHECK-NEXT: [[TMP5:%.*]] = fsub x86_fp80 0xK80000000000000000000, [[Z_1:%.*]] 28; CHECK-NEXT: call void @ccoshl(%0* sret([[TMP0]]) [[TMP2]], x86_fp80 [[TMP5]], x86_fp80 [[Z_0:%.*]]) #[[ATTR2:[0-9]+]] 29; CHECK-NEXT: [[TMP219:%.*]] = bitcast %0* [[TMP2]] to i8* 30; CHECK-NEXT: [[MEMTMP20:%.*]] = bitcast %0* [[MEMTMP]] to i8* 31; CHECK-NEXT: [[AGG_RESULT21:%.*]] = bitcast %0* [[AGG_RESULT:%.*]] to i8* 32; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[AGG_RESULT21]], i8* align 16 [[TMP219]], i32 32, i1 false) 33; CHECK-NEXT: ret void 34; 35entry: 36 %tmp2 = alloca %0 37 %memtmp = alloca %0, align 16 38 %tmp5 = fsub x86_fp80 0xK80000000000000000000, %z.1 39 call void @ccoshl(%0* sret(%0) %memtmp, x86_fp80 %tmp5, x86_fp80 %z.0) nounwind 40 %tmp219 = bitcast %0* %tmp2 to i8* 41 %memtmp20 = bitcast %0* %memtmp to i8* 42 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %tmp219, i8* align 16 %memtmp20, i32 32, i1 false) 43 %agg.result21 = bitcast %0* %agg.result to i8* 44 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %agg.result21, i8* align 16 %tmp219, i32 32, i1 false) 45 ret void 46} 47 48declare void @ccoshl(%0* nocapture sret(%0), x86_fp80, x86_fp80) nounwind 49 50 51; The intermediate alloca and one of the memcpy's should be eliminated, the 52; other should be related with a memmove. 53define void @test2(i8* %P, i8* %Q) nounwind { 54; CHECK-LABEL: @test2( 55; CHECK-NEXT: call void @llvm.memmove.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false) 56; CHECK-NEXT: ret void 57; 58 %memtmp = alloca %0, align 16 59 %R = bitcast %0* %memtmp to i8* 60 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false) 61 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false) 62 ret void 63 64} 65 66; The intermediate alloca and one of the memcpy's should be eliminated, the 67; other should be related with a memcpy. 68define void @test2_constant(i8* %Q) nounwind { 69; CHECK-LABEL: @test2_constant( 70; CHECK-NEXT: [[P:%.*]] = getelementptr inbounds [0 x i8], [0 x i8]* @C, i64 0, i64 0 71; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P]], i32 32, i1 false) 72; CHECK-NEXT: ret void 73; 74 %memtmp = alloca %0, align 16 75 %R = bitcast %0* %memtmp to i8* 76 %P = getelementptr inbounds [0 x i8], [0 x i8]* @C, i64 0, i64 0 77 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false) 78 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false) 79 ret void 80 81} 82 83; The intermediate alloca and one of the memcpy's should be eliminated, the 84; other should be related with a memcpy. 85define void @test2_memcpy(i8* noalias %P, i8* noalias %Q) nounwind { 86; CHECK-LABEL: @test2_memcpy( 87; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false) 88; CHECK-NEXT: ret void 89; 90 %memtmp = alloca %0, align 16 91 %R = bitcast %0* %memtmp to i8* 92 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false) 93 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false) 94 ret void 95 96} 97 98; Same as @test2_memcpy, but the remaining memcpy should remain non-inline even 99; if the one eliminated was inline. 100define void @test3_memcpy(i8* noalias %P, i8* noalias %Q) nounwind { 101; CHECK-LABEL: @test3_memcpy( 102; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false) 103; CHECK-NEXT: ret void 104; 105 %memtmp = alloca %0, align 16 106 %R = bitcast %0* %memtmp to i8* 107 call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false) 108 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false) 109 ret void 110 111} 112 113; Same as @test2_memcpy, but the remaining memcpy should remain inline even 114; if the one eliminated was not inline. 115define void @test4_memcpy(i8* noalias %P, i8* noalias %Q) nounwind { 116; CHECK-LABEL: @test4_memcpy( 117; CHECK-NEXT: call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false) 118; CHECK-NEXT: ret void 119; 120 %memtmp = alloca %0, align 16 121 %R = bitcast %0* %memtmp to i8* 122 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false) 123 call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false) 124 ret void 125 126} 127 128; Same as @test2_memcpy, and the inline-ness should be preserved. 129define void @test5_memcpy(i8* noalias %P, i8* noalias %Q) nounwind { 130; CHECK-LABEL: @test5_memcpy( 131; CHECK-NEXT: call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 [[Q:%.*]], i8* align 16 [[P:%.*]], i32 32, i1 false) 132; CHECK-NEXT: ret void 133; 134 %memtmp = alloca %0, align 16 135 %R = bitcast %0* %memtmp to i8* 136 call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 %R, i8* align 16 %P, i32 32, i1 false) 137 call void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* align 16 %Q, i8* align 16 %R, i32 32, i1 false) 138 ret void 139 140} 141 142 143@x = external global %0 144 145define void @test3(%0* noalias sret(%0) %agg.result) nounwind { 146; CHECK-LABEL: @test3( 147; CHECK-NEXT: [[X_0:%.*]] = alloca [[TMP0:%.*]], align 16 148; CHECK-NEXT: [[X_01:%.*]] = bitcast %0* [[X_0]] to i8* 149; CHECK-NEXT: [[AGG_RESULT1:%.*]] = bitcast %0* [[AGG_RESULT:%.*]] to i8* 150; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 [[AGG_RESULT1]], i8* align 16 bitcast (%0* @x to i8*), i32 32, i1 false) 151; CHECK-NEXT: [[AGG_RESULT2:%.*]] = bitcast %0* [[AGG_RESULT]] to i8* 152; CHECK-NEXT: ret void 153; 154 %x.0 = alloca %0 155 %x.01 = bitcast %0* %x.0 to i8* 156 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %x.01, i8* align 16 bitcast (%0* @x to i8*), i32 32, i1 false) 157 %agg.result2 = bitcast %0* %agg.result to i8* 158 call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 16 %agg.result2, i8* align 16 %x.01, i32 32, i1 false) 159 ret void 160} 161 162 163; PR8644 164define void @test4(i8 *%P) { 165; CHECK-LABEL: @test4( 166; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[P:%.*]]) 167; CHECK-NEXT: ret void 168; 169 %A = alloca %1 170 %a = bitcast %1* %A to i8* 171 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a, i8* align 4 %P, i64 8, i1 false) 172 call void @test4a(i8* align 1 byval(i8) %a) 173 ret void 174} 175 176; Make sure we don't remove the memcpy if the source address space doesn't match the byval argument 177define void @test4_addrspace(i8 addrspace(1)* %P) { 178; CHECK-LABEL: @test4_addrspace( 179; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8 180; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8* 181; CHECK-NEXT: call void @llvm.memcpy.p0i8.p1i8.i64(i8* align 4 [[A2]], i8 addrspace(1)* align 4 [[P:%.*]], i64 8, i1 false) 182; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[A2]]) 183; CHECK-NEXT: ret void 184; 185 %a1 = alloca %1 186 %a2 = bitcast %1* %a1 to i8* 187 call void @llvm.memcpy.p0i8.p1i8.i64(i8* align 4 %a2, i8 addrspace(1)* align 4 %P, i64 8, i1 false) 188 call void @test4a(i8* align 1 byval(i8) %a2) 189 ret void 190} 191 192define void @test4_write_between(i8 *%P) { 193; CHECK-LABEL: @test4_write_between( 194; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8 195; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8* 196; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false) 197; CHECK-NEXT: store i8 0, i8* [[A2]], align 1 198; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[A2]]) 199; CHECK-NEXT: ret void 200; 201 %a1 = alloca %1 202 %a2 = bitcast %1* %a1 to i8* 203 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a2, i8* align 4 %P, i64 8, i1 false) 204 store i8 0, i8* %a2 205 call void @test4a(i8* align 1 byval(i8) %a2) 206 ret void 207} 208 209define i8 @test4_read_between(i8 *%P) { 210; CHECK-LABEL: @test4_read_between( 211; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8 212; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8* 213; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false) 214; CHECK-NEXT: [[X:%.*]] = load i8, i8* [[A2]], align 1 215; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[P]]) 216; CHECK-NEXT: ret i8 [[X]] 217; 218 %a1 = alloca %1 219 %a2 = bitcast %1* %a1 to i8* 220 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a2, i8* align 4 %P, i64 8, i1 false) 221 %x = load i8, i8* %a2 222 call void @test4a(i8* align 1 byval(i8) %a2) 223 ret i8 %x 224} 225 226define void @test4_non_local(i8 *%P, i1 %c) { 227; CHECK-LABEL: @test4_non_local( 228; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8 229; CHECK-NEXT: [[A2:%.*]] = bitcast %1* [[A1]] to i8* 230; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[A2]], i8* align 4 [[P:%.*]], i64 8, i1 false) 231; CHECK-NEXT: br i1 [[C:%.*]], label [[CALL:%.*]], label [[EXIT:%.*]] 232; CHECK: call: 233; CHECK-NEXT: call void @test4a(i8* byval(i8) align 1 [[P]]) 234; CHECK-NEXT: br label [[EXIT]] 235; CHECK: exit: 236; CHECK-NEXT: ret void 237; 238 %a1 = alloca %1 239 %a2 = bitcast %1* %a1 to i8* 240 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %a2, i8* align 4 %P, i64 8, i1 false) 241 br i1 %c, label %call, label %exit 242 243call: 244 call void @test4a(i8* align 1 byval(i8) %a2) 245 br label %exit 246 247exit: 248 ret void 249} 250 251declare void @test4a(i8* align 1 byval(i8)) 252 253%struct.S = type { i128, [4 x i8]} 254 255@sS = external global %struct.S, align 16 256 257declare void @test5a(%struct.S* align 16 byval(%struct.S)) nounwind ssp 258 259 260; rdar://8713376 - This memcpy can't be eliminated. 261define i32 @test5(i32 %x) nounwind ssp { 262; CHECK-LABEL: @test5( 263; CHECK-NEXT: entry: 264; CHECK-NEXT: [[Y:%.*]] = alloca [[STRUCT_S:%.*]], align 16 265; CHECK-NEXT: [[TMP:%.*]] = bitcast %struct.S* [[Y]] to i8* 266; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP]], i8* align 16 bitcast (%struct.S* @sS to i8*), i64 32, i1 false) 267; CHECK-NEXT: [[A:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[Y]], i64 0, i32 1, i64 0 268; CHECK-NEXT: store i8 4, i8* [[A]], align 1 269; CHECK-NEXT: call void @test5a(%struct.S* byval([[STRUCT_S]]) align 16 [[Y]]) 270; CHECK-NEXT: ret i32 0 271; 272entry: 273 %y = alloca %struct.S, align 16 274 %tmp = bitcast %struct.S* %y to i8* 275 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 %tmp, i8* align 16 bitcast (%struct.S* @sS to i8*), i64 32, i1 false) 276 %a = getelementptr %struct.S, %struct.S* %y, i64 0, i32 1, i64 0 277 store i8 4, i8* %a 278 call void @test5a(%struct.S* align 16 byval(%struct.S) %y) 279 ret i32 0 280} 281 282;; Noop memcpy should be zapped. 283define void @test6(i8 *%P) { 284; CHECK-LABEL: @test6( 285; CHECK-NEXT: ret void 286; 287 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %P, i8* align 4 %P, i64 8, i1 false) 288 ret void 289} 290 291 292; PR9794 - Should forward memcpy into byval argument even though the memcpy 293; isn't itself 8 byte aligned. 294%struct.p = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } 295 296define i32 @test7(%struct.p* nocapture align 8 byval(%struct.p) %q) nounwind ssp { 297; CHECK-LABEL: @test7( 298; CHECK-NEXT: entry: 299; CHECK-NEXT: [[CALL:%.*]] = call i32 @g(%struct.p* byval([[STRUCT_P:%.*]]) align 8 [[Q:%.*]]) #[[ATTR2]] 300; CHECK-NEXT: ret i32 [[CALL]] 301; 302entry: 303 %agg.tmp = alloca %struct.p, align 4 304 %tmp = bitcast %struct.p* %agg.tmp to i8* 305 %tmp1 = bitcast %struct.p* %q to i8* 306 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %tmp, i8* align 4 %tmp1, i64 48, i1 false) 307 %call = call i32 @g(%struct.p* align 8 byval(%struct.p) %agg.tmp) nounwind 308 ret i32 %call 309} 310 311declare i32 @g(%struct.p* align 8 byval(%struct.p)) 312 313 314; PR11142 - When looking for a memcpy-memcpy dependency, don't get stuck on 315; instructions between the memcpy's that only affect the destination pointer. 316@test8.str = internal constant [7 x i8] c"ABCDEF\00" 317 318define void @test8() { 319; CHECK-LABEL: @test8( 320; CHECK-NEXT: ret void 321; 322 %A = tail call i8* @malloc(i32 10) 323 %B = getelementptr inbounds i8, i8* %A, i64 2 324 tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %B, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @test8.str, i64 0, i64 0), i32 7, i1 false) 325 %C = tail call i8* @malloc(i32 10) 326 %D = getelementptr inbounds i8, i8* %C, i64 2 327 tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* %D, i8* %B, i32 7, i1 false) 328 ret void 329} 330 331declare noalias i8* @malloc(i32) willreturn allockind("alloc,uninitialized") allocsize(0) 332 333; rdar://11341081 334%struct.big = type { [50 x i32] } 335 336define void @test9_addrspacecast() nounwind ssp uwtable { 337; CHECK-LABEL: @test9_addrspacecast( 338; CHECK-NEXT: entry: 339; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_BIG:%.*]], align 4 340; CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_BIG]], align 4 341; CHECK-NEXT: call void @f1(%struct.big* sret([[STRUCT_BIG]]) [[B]]) 342; CHECK-NEXT: [[TMP0:%.*]] = addrspacecast %struct.big* [[B]] to i8 addrspace(1)* 343; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast %struct.big* [[TMP]] to i8 addrspace(1)* 344; CHECK-NEXT: call void @f2(%struct.big* [[B]]) 345; CHECK-NEXT: ret void 346; 347entry: 348 %b = alloca %struct.big, align 4 349 %tmp = alloca %struct.big, align 4 350 call void @f1(%struct.big* sret(%struct.big) %tmp) 351 %0 = addrspacecast %struct.big* %b to i8 addrspace(1)* 352 %1 = addrspacecast %struct.big* %tmp to i8 addrspace(1)* 353 call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* align 4 %0, i8 addrspace(1)* align 4 %1, i64 200, i1 false) 354 call void @f2(%struct.big* %b) 355 ret void 356} 357 358define void @test9() nounwind ssp uwtable { 359; CHECK-LABEL: @test9( 360; CHECK-NEXT: entry: 361; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_BIG:%.*]], align 4 362; CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_BIG]], align 4 363; CHECK-NEXT: call void @f1(%struct.big* sret([[STRUCT_BIG]]) [[B]]) 364; CHECK-NEXT: [[TMP0:%.*]] = bitcast %struct.big* [[B]] to i8* 365; CHECK-NEXT: [[TMP1:%.*]] = bitcast %struct.big* [[TMP]] to i8* 366; CHECK-NEXT: call void @f2(%struct.big* [[B]]) 367; CHECK-NEXT: ret void 368; 369entry: 370 %b = alloca %struct.big, align 4 371 %tmp = alloca %struct.big, align 4 372 call void @f1(%struct.big* sret(%struct.big) %tmp) 373 %0 = bitcast %struct.big* %b to i8* 374 %1 = bitcast %struct.big* %tmp to i8* 375 call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 200, i1 false) 376 call void @f2(%struct.big* %b) 377 ret void 378} 379 380; rdar://14073661. 381; Test10 triggered assertion when the compiler try to get the size of the 382; opaque type of *x, where the x is the formal argument with attribute 'sret'. 383 384%opaque = type opaque 385declare void @foo(i32* noalias nocapture) 386 387define void @test10(%opaque* noalias nocapture sret(%opaque) %x, i32 %y) { 388; CHECK-LABEL: @test10( 389; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4 390; CHECK-NEXT: store i32 [[Y:%.*]], i32* [[A]], align 4 391; CHECK-NEXT: call void @foo(i32* noalias nocapture [[A]]) 392; CHECK-NEXT: [[C:%.*]] = load i32, i32* [[A]], align 4 393; CHECK-NEXT: [[D:%.*]] = bitcast %opaque* [[X:%.*]] to i32* 394; CHECK-NEXT: store i32 [[C]], i32* [[D]], align 4 395; CHECK-NEXT: ret void 396; 397 %a = alloca i32, align 4 398 store i32 %y, i32* %a 399 call void @foo(i32* noalias nocapture %a) 400 %c = load i32, i32* %a 401 %d = bitcast %opaque* %x to i32* 402 store i32 %c, i32* %d 403 ret void 404} 405 406; don't create new addressspacecasts when we don't know they're safe for the target 407define void @test11([20 x i32] addrspace(1)* nocapture dereferenceable(80) %P) { 408; CHECK-LABEL: @test11( 409; CHECK-NEXT: [[B:%.*]] = bitcast [20 x i32] addrspace(1)* [[P:%.*]] to i8 addrspace(1)* 410; CHECK-NEXT: call void @llvm.memset.p1i8.i64(i8 addrspace(1)* align 4 [[B]], i8 0, i64 80, i1 false) 411; CHECK-NEXT: ret void 412; 413 %A = alloca [20 x i32], align 4 414 %a = bitcast [20 x i32]* %A to i8* 415 %b = bitcast [20 x i32] addrspace(1)* %P to i8 addrspace(1)* 416 call void @llvm.memset.p0i8.i64(i8* align 4 %a, i8 0, i64 80, i1 false) 417 call void @llvm.memcpy.p1i8.p0i8.i64(i8 addrspace(1)* align 4 %b, i8* align 4 %a, i64 80, i1 false) 418 ret void 419} 420 421declare void @f1(%struct.big* nocapture sret(%struct.big)) 422declare void @f2(%struct.big*) 423