1; RUN: opt < %s -passes=globalopt -S -o - | FileCheck %s
2
3%class.Class = type { i8, i8, i8, i8 }
4@A = local_unnamed_addr global %class.Class undef, align 4
5@B = local_unnamed_addr global %class.Class undef, align 4
6
7@llvm.global_ctors = appending global [2 x { i32, void ()*, i8* }] [
8  { i32, void ()*, i8* } { i32 65535, void ()* @initA, i8* null },
9  { i32, void ()*, i8* } { i32 65535, void ()* @initB, i8* null }
10]
11
12define internal void @initA() section "__TEXT,__StaticInit,regular,pure_instructions" {
13entry:
14  store i32 -1, i32* bitcast (%class.Class* @A to i32*), align 4
15  ret void
16}
17
18define internal void @initB() section "__TEXT,__StaticInit,regular,pure_instructions" {
19entry:
20  store i8 -1, i8* bitcast (%class.Class* @B to i8*), align 4
21  ret void
22}
23
24; rdar://79503568
25; Check that we don't miscompile when the store covers the whole struct.
26; CHECK-NOT: @A = local_unnamed_addr global %class.Class { i8 -1, i8 undef, i8 undef, i8 undef }, align 4
27
28; FIXME: We could optimzie this as { i8 -1, i8 -1, i8 -1, i8 -1 } if constant folding were a little smarter.
29; CHECK: @A = local_unnamed_addr global %class.Class undef, align 4
30
31; Check that we still perform the transform when store is smaller than the width of the 0th element.
32; CHECK: @B = local_unnamed_addr global %class.Class { i8 -1, i8 undef, i8 undef, i8 undef }, align 4
33
34; CHECK: define internal void @initA()
35; CHECK-NOT: define internal void @initB()
36
37