15e3a8953SRoman Lebedev; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2cee313d2SEric Christopher; RUN: opt -O3 -S < %s | FileCheck %s
3cee313d2SEric Christopher
4cee313d2SEric Christophertarget datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
5cee313d2SEric Christophertarget triple = "x86_64-apple-macosx10.6.7"
6cee313d2SEric Christopher
7cee313d2SEric Christopherdeclare i8* @malloc(i64)
8cee313d2SEric Christopherdeclare void @free(i8*)
9cee313d2SEric Christopher
10cee313d2SEric Christopher; PR2338
11cee313d2SEric Christopherdefine void @test1() nounwind ssp {
125e3a8953SRoman Lebedev; CHECK-LABEL: @test1(
135e3a8953SRoman Lebedev; CHECK-NEXT:    ret void
145e3a8953SRoman Lebedev;
15cee313d2SEric Christopher  %retval = alloca i32, align 4
16cee313d2SEric Christopher  %i = alloca i8*, align 8
17cee313d2SEric Christopher  %call = call i8* @malloc(i64 1)
18cee313d2SEric Christopher  store i8* %call, i8** %i, align 8
19cee313d2SEric Christopher  %tmp = load i8*, i8** %i, align 8
20cee313d2SEric Christopher  store i8 1, i8* %tmp
21cee313d2SEric Christopher  %tmp1 = load i8*, i8** %i, align 8
22cee313d2SEric Christopher  call void @free(i8* %tmp1)
23cee313d2SEric Christopher  ret void
24cee313d2SEric Christopher
25cee313d2SEric Christopher}
26cee313d2SEric Christopher
27cee313d2SEric Christopher; This function exposes a phase ordering problem when InstCombine is
28cee313d2SEric Christopher; turning %add into a bitmask, making it difficult to spot a 0 return value.
29cee313d2SEric Christopher;
30cee313d2SEric Christopher; It it also important that %add is expressed as a multiple of %div so scalar
31cee313d2SEric Christopher; evolution can recognize it.
32cee313d2SEric Christopherdefine i32 @test2(i32 %a, i32* %p) nounwind uwtable ssp {
335e3a8953SRoman Lebedev; CHECK-LABEL: @test2(
345e3a8953SRoman Lebedev; CHECK-NEXT:  entry:
35*a96638e5SMuhammad Omair Javaid; CHECK-NEXT:    [[DIV:%.*]] = lshr i32 [[A:%.*]], 2
36*a96638e5SMuhammad Omair Javaid; CHECK-NEXT:    store i32 [[DIV]], i32* [[P:%.*]], align 4
37*a96638e5SMuhammad Omair Javaid; CHECK-NEXT:    [[ADD:%.*]] = shl nuw nsw i32 [[DIV]], 1
385e3a8953SRoman Lebedev; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
395e3a8953SRoman Lebedev; CHECK-NEXT:    store i32 [[ADD]], i32* [[ARRAYIDX1]], align 4
405e3a8953SRoman Lebedev; CHECK-NEXT:    ret i32 0
415e3a8953SRoman Lebedev;
42cee313d2SEric Christopherentry:
43cee313d2SEric Christopher  %div = udiv i32 %a, 4
44cee313d2SEric Christopher  %arrayidx = getelementptr inbounds i32, i32* %p, i64 0
45cee313d2SEric Christopher  store i32 %div, i32* %arrayidx, align 4
46cee313d2SEric Christopher  %add = add i32 %div, %div
47cee313d2SEric Christopher  %arrayidx1 = getelementptr inbounds i32, i32* %p, i64 1
48cee313d2SEric Christopher  store i32 %add, i32* %arrayidx1, align 4
49cee313d2SEric Christopher  %arrayidx2 = getelementptr inbounds i32, i32* %p, i64 1
50cee313d2SEric Christopher  %0 = load i32, i32* %arrayidx2, align 4
51cee313d2SEric Christopher  %arrayidx3 = getelementptr inbounds i32, i32* %p, i64 0
52cee313d2SEric Christopher  %1 = load i32, i32* %arrayidx3, align 4
53cee313d2SEric Christopher  %mul = mul i32 2, %1
54cee313d2SEric Christopher  %sub = sub i32 %0, %mul
55cee313d2SEric Christopher  ret i32 %sub
56cee313d2SEric Christopher
57cee313d2SEric Christopher}
58