1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -S -slp-vectorizer -mattr=+sse < %s | FileCheck %s --check-prefixes=CHECK,SSE 3; RUN: opt -S -slp-vectorizer -mattr=+avx512f < %s | FileCheck %s --check-prefixes=CHECK,AVX512 4 5target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 6target triple = "x86_64-unknown-linux-gnu" 7 8; Function Attrs: norecurse nounwind readnone uwtable 9define zeroext i8 @foo(i32 %x, i32 %y, i32 %a, i32 %b) local_unnamed_addr #0 { 10; CHECK-LABEL: @foo( 11; CHECK-NEXT: entry: 12; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]] 13; CHECK-NEXT: [[B_A:%.*]] = select i1 [[CMP]], i32 [[B:%.*]], i32 [[A:%.*]] 14; CHECK-NEXT: [[RETVAL_0:%.*]] = trunc i32 [[B_A]] to i8 15; CHECK-NEXT: ret i8 [[RETVAL_0]] 16; 17entry: 18 %cmp = icmp slt i32 %x, %y 19 %b.a = select i1 %cmp, i32 %b, i32 %a 20 %retval.0 = trunc i32 %b.a to i8 21 ret i8 %retval.0 22} 23 24define void @bar(i8* noalias nocapture readonly %a, i8* noalias nocapture readonly %b, i8* noalias nocapture readonly %c, i8* noalias nocapture readonly %d, i8* noalias nocapture %e, i32 %w) local_unnamed_addr #1 { 25; CHECK-LABEL: @bar( 26; CHECK-NEXT: entry: 27; CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i32> poison, i32 [[W:%.*]], i32 0 28; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <16 x i32> [[TMP0]], <16 x i32> poison, <16 x i32> zeroinitializer 29; CHECK-NEXT: br label [[FOR_BODY:%.*]] 30; CHECK: for.body: 31; CHECK-NEXT: [[I_0356:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 32; CHECK-NEXT: [[A_ADDR_0355:%.*]] = phi i8* [ [[A:%.*]], [[ENTRY]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ] 33; CHECK-NEXT: [[E_ADDR_0354:%.*]] = phi i8* [ [[E:%.*]], [[ENTRY]] ], [ [[ADD_PTR192:%.*]], [[FOR_BODY]] ] 34; CHECK-NEXT: [[D_ADDR_0353:%.*]] = phi i8* [ [[D:%.*]], [[ENTRY]] ], [ [[ADD_PTR191:%.*]], [[FOR_BODY]] ] 35; CHECK-NEXT: [[C_ADDR_0352:%.*]] = phi i8* [ [[C:%.*]], [[ENTRY]] ], [ [[ADD_PTR190:%.*]], [[FOR_BODY]] ] 36; CHECK-NEXT: [[B_ADDR_0351:%.*]] = phi i8* [ [[B:%.*]], [[ENTRY]] ], [ [[ADD_PTR189:%.*]], [[FOR_BODY]] ] 37; CHECK-NEXT: [[TMP1:%.*]] = bitcast i8* [[C_ADDR_0352]] to <16 x i8>* 38; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, <16 x i8>* [[TMP1]], align 1 39; CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[D_ADDR_0353]] to <16 x i8>* 40; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, <16 x i8>* [[TMP3]], align 1 41; CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[A_ADDR_0355]] to <16 x i8>* 42; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, <16 x i8>* [[TMP5]], align 1 43; CHECK-NEXT: [[TMP7:%.*]] = bitcast i8* [[B_ADDR_0351]] to <16 x i8>* 44; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, <16 x i8>* [[TMP7]], align 1 45; CHECK-NEXT: [[TMP9:%.*]] = icmp ult <16 x i8> [[TMP2]], [[TMP4]] 46; CHECK-NEXT: [[TMP10:%.*]] = select <16 x i1> [[TMP9]], <16 x i8> [[TMP8]], <16 x i8> [[TMP6]] 47; CHECK-NEXT: [[TMP11:%.*]] = zext <16 x i8> [[TMP10]] to <16 x i32> 48; CHECK-NEXT: [[TMP12:%.*]] = mul <16 x i32> [[TMP11]], [[SHUFFLE]] 49; CHECK-NEXT: [[TMP13:%.*]] = trunc <16 x i32> [[TMP12]] to <16 x i8> 50; CHECK-NEXT: [[TMP14:%.*]] = bitcast i8* [[E_ADDR_0354]] to <16 x i8>* 51; CHECK-NEXT: store <16 x i8> [[TMP13]], <16 x i8>* [[TMP14]], align 1 52; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_0356]], 1 53; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, i8* [[A_ADDR_0355]], i64 16 54; CHECK-NEXT: [[ADD_PTR189]] = getelementptr inbounds i8, i8* [[B_ADDR_0351]], i64 16 55; CHECK-NEXT: [[ADD_PTR190]] = getelementptr inbounds i8, i8* [[C_ADDR_0352]], i64 16 56; CHECK-NEXT: [[ADD_PTR191]] = getelementptr inbounds i8, i8* [[D_ADDR_0353]], i64 16 57; CHECK-NEXT: [[ADD_PTR192]] = getelementptr inbounds i8, i8* [[E_ADDR_0354]], i64 16 58; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 8 59; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 60; CHECK: for.end: 61; CHECK-NEXT: ret void 62; 63entry: 64 br label %for.body 65 66for.body: ; preds = %for.body, %entry 67 %i.0356 = phi i32 [ 0, %entry ], [ %inc, %for.body ] 68 %a.addr.0355 = phi i8* [ %a, %entry ], [ %add.ptr, %for.body ] 69 %e.addr.0354 = phi i8* [ %e, %entry ], [ %add.ptr192, %for.body ] 70 %d.addr.0353 = phi i8* [ %d, %entry ], [ %add.ptr191, %for.body ] 71 %c.addr.0352 = phi i8* [ %c, %entry ], [ %add.ptr190, %for.body ] 72 %b.addr.0351 = phi i8* [ %b, %entry ], [ %add.ptr189, %for.body ] 73 %0 = load i8, i8* %c.addr.0352, align 1 74 %1 = load i8, i8* %d.addr.0353, align 1 75 %2 = load i8, i8* %a.addr.0355, align 1 76 %3 = load i8, i8* %b.addr.0351, align 1 77 %cmp.i = icmp ult i8 %0, %1 78 %b.a.i.v.v = select i1 %cmp.i, i8 %3, i8 %2 79 %b.a.i.v = zext i8 %b.a.i.v.v to i32 80 %b.a.i = mul i32 %b.a.i.v, %w 81 %retval.0.i = trunc i32 %b.a.i to i8 82 store i8 %retval.0.i, i8* %e.addr.0354, align 1 83 %arrayidx9 = getelementptr inbounds i8, i8* %c.addr.0352, i64 1 84 %4 = load i8, i8* %arrayidx9, align 1 85 %arrayidx11 = getelementptr inbounds i8, i8* %d.addr.0353, i64 1 86 %5 = load i8, i8* %arrayidx11, align 1 87 %arrayidx13 = getelementptr inbounds i8, i8* %a.addr.0355, i64 1 88 %6 = load i8, i8* %arrayidx13, align 1 89 %arrayidx16 = getelementptr inbounds i8, i8* %b.addr.0351, i64 1 90 %7 = load i8, i8* %arrayidx16, align 1 91 %cmp.i348 = icmp ult i8 %4, %5 92 %b.a.i349.v.v = select i1 %cmp.i348, i8 %7, i8 %6 93 %b.a.i349.v = zext i8 %b.a.i349.v.v to i32 94 %b.a.i349 = mul i32 %b.a.i349.v, %w 95 %retval.0.i350 = trunc i32 %b.a.i349 to i8 96 %arrayidx20 = getelementptr inbounds i8, i8* %e.addr.0354, i64 1 97 store i8 %retval.0.i350, i8* %arrayidx20, align 1 98 %arrayidx21 = getelementptr inbounds i8, i8* %c.addr.0352, i64 2 99 %8 = load i8, i8* %arrayidx21, align 1 100 %arrayidx23 = getelementptr inbounds i8, i8* %d.addr.0353, i64 2 101 %9 = load i8, i8* %arrayidx23, align 1 102 %arrayidx25 = getelementptr inbounds i8, i8* %a.addr.0355, i64 2 103 %10 = load i8, i8* %arrayidx25, align 1 104 %arrayidx28 = getelementptr inbounds i8, i8* %b.addr.0351, i64 2 105 %11 = load i8, i8* %arrayidx28, align 1 106 %cmp.i345 = icmp ult i8 %8, %9 107 %b.a.i346.v.v = select i1 %cmp.i345, i8 %11, i8 %10 108 %b.a.i346.v = zext i8 %b.a.i346.v.v to i32 109 %b.a.i346 = mul i32 %b.a.i346.v, %w 110 %retval.0.i347 = trunc i32 %b.a.i346 to i8 111 %arrayidx32 = getelementptr inbounds i8, i8* %e.addr.0354, i64 2 112 store i8 %retval.0.i347, i8* %arrayidx32, align 1 113 %arrayidx33 = getelementptr inbounds i8, i8* %c.addr.0352, i64 3 114 %12 = load i8, i8* %arrayidx33, align 1 115 %arrayidx35 = getelementptr inbounds i8, i8* %d.addr.0353, i64 3 116 %13 = load i8, i8* %arrayidx35, align 1 117 %arrayidx37 = getelementptr inbounds i8, i8* %a.addr.0355, i64 3 118 %14 = load i8, i8* %arrayidx37, align 1 119 %arrayidx40 = getelementptr inbounds i8, i8* %b.addr.0351, i64 3 120 %15 = load i8, i8* %arrayidx40, align 1 121 %cmp.i342 = icmp ult i8 %12, %13 122 %b.a.i343.v.v = select i1 %cmp.i342, i8 %15, i8 %14 123 %b.a.i343.v = zext i8 %b.a.i343.v.v to i32 124 %b.a.i343 = mul i32 %b.a.i343.v, %w 125 %retval.0.i344 = trunc i32 %b.a.i343 to i8 126 %arrayidx44 = getelementptr inbounds i8, i8* %e.addr.0354, i64 3 127 store i8 %retval.0.i344, i8* %arrayidx44, align 1 128 %arrayidx45 = getelementptr inbounds i8, i8* %c.addr.0352, i64 4 129 %16 = load i8, i8* %arrayidx45, align 1 130 %arrayidx47 = getelementptr inbounds i8, i8* %d.addr.0353, i64 4 131 %17 = load i8, i8* %arrayidx47, align 1 132 %arrayidx49 = getelementptr inbounds i8, i8* %a.addr.0355, i64 4 133 %18 = load i8, i8* %arrayidx49, align 1 134 %arrayidx52 = getelementptr inbounds i8, i8* %b.addr.0351, i64 4 135 %19 = load i8, i8* %arrayidx52, align 1 136 %cmp.i339 = icmp ult i8 %16, %17 137 %b.a.i340.v.v = select i1 %cmp.i339, i8 %19, i8 %18 138 %b.a.i340.v = zext i8 %b.a.i340.v.v to i32 139 %b.a.i340 = mul i32 %b.a.i340.v, %w 140 %retval.0.i341 = trunc i32 %b.a.i340 to i8 141 %arrayidx56 = getelementptr inbounds i8, i8* %e.addr.0354, i64 4 142 store i8 %retval.0.i341, i8* %arrayidx56, align 1 143 %arrayidx57 = getelementptr inbounds i8, i8* %c.addr.0352, i64 5 144 %20 = load i8, i8* %arrayidx57, align 1 145 %arrayidx59 = getelementptr inbounds i8, i8* %d.addr.0353, i64 5 146 %21 = load i8, i8* %arrayidx59, align 1 147 %arrayidx61 = getelementptr inbounds i8, i8* %a.addr.0355, i64 5 148 %22 = load i8, i8* %arrayidx61, align 1 149 %arrayidx64 = getelementptr inbounds i8, i8* %b.addr.0351, i64 5 150 %23 = load i8, i8* %arrayidx64, align 1 151 %cmp.i336 = icmp ult i8 %20, %21 152 %b.a.i337.v.v = select i1 %cmp.i336, i8 %23, i8 %22 153 %b.a.i337.v = zext i8 %b.a.i337.v.v to i32 154 %b.a.i337 = mul i32 %b.a.i337.v, %w 155 %retval.0.i338 = trunc i32 %b.a.i337 to i8 156 %arrayidx68 = getelementptr inbounds i8, i8* %e.addr.0354, i64 5 157 store i8 %retval.0.i338, i8* %arrayidx68, align 1 158 %arrayidx69 = getelementptr inbounds i8, i8* %c.addr.0352, i64 6 159 %24 = load i8, i8* %arrayidx69, align 1 160 %arrayidx71 = getelementptr inbounds i8, i8* %d.addr.0353, i64 6 161 %25 = load i8, i8* %arrayidx71, align 1 162 %arrayidx73 = getelementptr inbounds i8, i8* %a.addr.0355, i64 6 163 %26 = load i8, i8* %arrayidx73, align 1 164 %arrayidx76 = getelementptr inbounds i8, i8* %b.addr.0351, i64 6 165 %27 = load i8, i8* %arrayidx76, align 1 166 %cmp.i333 = icmp ult i8 %24, %25 167 %b.a.i334.v.v = select i1 %cmp.i333, i8 %27, i8 %26 168 %b.a.i334.v = zext i8 %b.a.i334.v.v to i32 169 %b.a.i334 = mul i32 %b.a.i334.v, %w 170 %retval.0.i335 = trunc i32 %b.a.i334 to i8 171 %arrayidx80 = getelementptr inbounds i8, i8* %e.addr.0354, i64 6 172 store i8 %retval.0.i335, i8* %arrayidx80, align 1 173 %arrayidx81 = getelementptr inbounds i8, i8* %c.addr.0352, i64 7 174 %28 = load i8, i8* %arrayidx81, align 1 175 %arrayidx83 = getelementptr inbounds i8, i8* %d.addr.0353, i64 7 176 %29 = load i8, i8* %arrayidx83, align 1 177 %arrayidx85 = getelementptr inbounds i8, i8* %a.addr.0355, i64 7 178 %30 = load i8, i8* %arrayidx85, align 1 179 %arrayidx88 = getelementptr inbounds i8, i8* %b.addr.0351, i64 7 180 %31 = load i8, i8* %arrayidx88, align 1 181 %cmp.i330 = icmp ult i8 %28, %29 182 %b.a.i331.v.v = select i1 %cmp.i330, i8 %31, i8 %30 183 %b.a.i331.v = zext i8 %b.a.i331.v.v to i32 184 %b.a.i331 = mul i32 %b.a.i331.v, %w 185 %retval.0.i332 = trunc i32 %b.a.i331 to i8 186 %arrayidx92 = getelementptr inbounds i8, i8* %e.addr.0354, i64 7 187 store i8 %retval.0.i332, i8* %arrayidx92, align 1 188 %arrayidx93 = getelementptr inbounds i8, i8* %c.addr.0352, i64 8 189 %32 = load i8, i8* %arrayidx93, align 1 190 %arrayidx95 = getelementptr inbounds i8, i8* %d.addr.0353, i64 8 191 %33 = load i8, i8* %arrayidx95, align 1 192 %arrayidx97 = getelementptr inbounds i8, i8* %a.addr.0355, i64 8 193 %34 = load i8, i8* %arrayidx97, align 1 194 %arrayidx100 = getelementptr inbounds i8, i8* %b.addr.0351, i64 8 195 %35 = load i8, i8* %arrayidx100, align 1 196 %cmp.i327 = icmp ult i8 %32, %33 197 %b.a.i328.v.v = select i1 %cmp.i327, i8 %35, i8 %34 198 %b.a.i328.v = zext i8 %b.a.i328.v.v to i32 199 %b.a.i328 = mul i32 %b.a.i328.v, %w 200 %retval.0.i329 = trunc i32 %b.a.i328 to i8 201 %arrayidx104 = getelementptr inbounds i8, i8* %e.addr.0354, i64 8 202 store i8 %retval.0.i329, i8* %arrayidx104, align 1 203 %arrayidx105 = getelementptr inbounds i8, i8* %c.addr.0352, i64 9 204 %36 = load i8, i8* %arrayidx105, align 1 205 %arrayidx107 = getelementptr inbounds i8, i8* %d.addr.0353, i64 9 206 %37 = load i8, i8* %arrayidx107, align 1 207 %arrayidx109 = getelementptr inbounds i8, i8* %a.addr.0355, i64 9 208 %38 = load i8, i8* %arrayidx109, align 1 209 %arrayidx112 = getelementptr inbounds i8, i8* %b.addr.0351, i64 9 210 %39 = load i8, i8* %arrayidx112, align 1 211 %cmp.i324 = icmp ult i8 %36, %37 212 %b.a.i325.v.v = select i1 %cmp.i324, i8 %39, i8 %38 213 %b.a.i325.v = zext i8 %b.a.i325.v.v to i32 214 %b.a.i325 = mul i32 %b.a.i325.v, %w 215 %retval.0.i326 = trunc i32 %b.a.i325 to i8 216 %arrayidx116 = getelementptr inbounds i8, i8* %e.addr.0354, i64 9 217 store i8 %retval.0.i326, i8* %arrayidx116, align 1 218 %arrayidx117 = getelementptr inbounds i8, i8* %c.addr.0352, i64 10 219 %40 = load i8, i8* %arrayidx117, align 1 220 %arrayidx119 = getelementptr inbounds i8, i8* %d.addr.0353, i64 10 221 %41 = load i8, i8* %arrayidx119, align 1 222 %arrayidx121 = getelementptr inbounds i8, i8* %a.addr.0355, i64 10 223 %42 = load i8, i8* %arrayidx121, align 1 224 %arrayidx124 = getelementptr inbounds i8, i8* %b.addr.0351, i64 10 225 %43 = load i8, i8* %arrayidx124, align 1 226 %cmp.i321 = icmp ult i8 %40, %41 227 %b.a.i322.v.v = select i1 %cmp.i321, i8 %43, i8 %42 228 %b.a.i322.v = zext i8 %b.a.i322.v.v to i32 229 %b.a.i322 = mul i32 %b.a.i322.v, %w 230 %retval.0.i323 = trunc i32 %b.a.i322 to i8 231 %arrayidx128 = getelementptr inbounds i8, i8* %e.addr.0354, i64 10 232 store i8 %retval.0.i323, i8* %arrayidx128, align 1 233 %arrayidx129 = getelementptr inbounds i8, i8* %c.addr.0352, i64 11 234 %44 = load i8, i8* %arrayidx129, align 1 235 %arrayidx131 = getelementptr inbounds i8, i8* %d.addr.0353, i64 11 236 %45 = load i8, i8* %arrayidx131, align 1 237 %arrayidx133 = getelementptr inbounds i8, i8* %a.addr.0355, i64 11 238 %46 = load i8, i8* %arrayidx133, align 1 239 %arrayidx136 = getelementptr inbounds i8, i8* %b.addr.0351, i64 11 240 %47 = load i8, i8* %arrayidx136, align 1 241 %cmp.i318 = icmp ult i8 %44, %45 242 %b.a.i319.v.v = select i1 %cmp.i318, i8 %47, i8 %46 243 %b.a.i319.v = zext i8 %b.a.i319.v.v to i32 244 %b.a.i319 = mul i32 %b.a.i319.v, %w 245 %retval.0.i320 = trunc i32 %b.a.i319 to i8 246 %arrayidx140 = getelementptr inbounds i8, i8* %e.addr.0354, i64 11 247 store i8 %retval.0.i320, i8* %arrayidx140, align 1 248 %arrayidx141 = getelementptr inbounds i8, i8* %c.addr.0352, i64 12 249 %48 = load i8, i8* %arrayidx141, align 1 250 %arrayidx143 = getelementptr inbounds i8, i8* %d.addr.0353, i64 12 251 %49 = load i8, i8* %arrayidx143, align 1 252 %arrayidx145 = getelementptr inbounds i8, i8* %a.addr.0355, i64 12 253 %50 = load i8, i8* %arrayidx145, align 1 254 %arrayidx148 = getelementptr inbounds i8, i8* %b.addr.0351, i64 12 255 %51 = load i8, i8* %arrayidx148, align 1 256 %cmp.i315 = icmp ult i8 %48, %49 257 %b.a.i316.v.v = select i1 %cmp.i315, i8 %51, i8 %50 258 %b.a.i316.v = zext i8 %b.a.i316.v.v to i32 259 %b.a.i316 = mul i32 %b.a.i316.v, %w 260 %retval.0.i317 = trunc i32 %b.a.i316 to i8 261 %arrayidx152 = getelementptr inbounds i8, i8* %e.addr.0354, i64 12 262 store i8 %retval.0.i317, i8* %arrayidx152, align 1 263 %arrayidx153 = getelementptr inbounds i8, i8* %c.addr.0352, i64 13 264 %52 = load i8, i8* %arrayidx153, align 1 265 %arrayidx155 = getelementptr inbounds i8, i8* %d.addr.0353, i64 13 266 %53 = load i8, i8* %arrayidx155, align 1 267 %arrayidx157 = getelementptr inbounds i8, i8* %a.addr.0355, i64 13 268 %54 = load i8, i8* %arrayidx157, align 1 269 %arrayidx160 = getelementptr inbounds i8, i8* %b.addr.0351, i64 13 270 %55 = load i8, i8* %arrayidx160, align 1 271 %cmp.i312 = icmp ult i8 %52, %53 272 %b.a.i313.v.v = select i1 %cmp.i312, i8 %55, i8 %54 273 %b.a.i313.v = zext i8 %b.a.i313.v.v to i32 274 %b.a.i313 = mul i32 %b.a.i313.v, %w 275 %retval.0.i314 = trunc i32 %b.a.i313 to i8 276 %arrayidx164 = getelementptr inbounds i8, i8* %e.addr.0354, i64 13 277 store i8 %retval.0.i314, i8* %arrayidx164, align 1 278 %arrayidx165 = getelementptr inbounds i8, i8* %c.addr.0352, i64 14 279 %56 = load i8, i8* %arrayidx165, align 1 280 %arrayidx167 = getelementptr inbounds i8, i8* %d.addr.0353, i64 14 281 %57 = load i8, i8* %arrayidx167, align 1 282 %arrayidx169 = getelementptr inbounds i8, i8* %a.addr.0355, i64 14 283 %58 = load i8, i8* %arrayidx169, align 1 284 %arrayidx172 = getelementptr inbounds i8, i8* %b.addr.0351, i64 14 285 %59 = load i8, i8* %arrayidx172, align 1 286 %cmp.i309 = icmp ult i8 %56, %57 287 %b.a.i310.v.v = select i1 %cmp.i309, i8 %59, i8 %58 288 %b.a.i310.v = zext i8 %b.a.i310.v.v to i32 289 %b.a.i310 = mul i32 %b.a.i310.v, %w 290 %retval.0.i311 = trunc i32 %b.a.i310 to i8 291 %arrayidx176 = getelementptr inbounds i8, i8* %e.addr.0354, i64 14 292 store i8 %retval.0.i311, i8* %arrayidx176, align 1 293 %arrayidx177 = getelementptr inbounds i8, i8* %c.addr.0352, i64 15 294 %60 = load i8, i8* %arrayidx177, align 1 295 %arrayidx179 = getelementptr inbounds i8, i8* %d.addr.0353, i64 15 296 %61 = load i8, i8* %arrayidx179, align 1 297 %arrayidx181 = getelementptr inbounds i8, i8* %a.addr.0355, i64 15 298 %62 = load i8, i8* %arrayidx181, align 1 299 %arrayidx184 = getelementptr inbounds i8, i8* %b.addr.0351, i64 15 300 %63 = load i8, i8* %arrayidx184, align 1 301 %cmp.i306 = icmp ult i8 %60, %61 302 %b.a.i307.v.v = select i1 %cmp.i306, i8 %63, i8 %62 303 %b.a.i307.v = zext i8 %b.a.i307.v.v to i32 304 %b.a.i307 = mul i32 %b.a.i307.v, %w 305 %retval.0.i308 = trunc i32 %b.a.i307 to i8 306 %arrayidx188 = getelementptr inbounds i8, i8* %e.addr.0354, i64 15 307 store i8 %retval.0.i308, i8* %arrayidx188, align 1 308 %inc = add nuw nsw i32 %i.0356, 1 309 %add.ptr = getelementptr inbounds i8, i8* %a.addr.0355, i64 16 310 %add.ptr189 = getelementptr inbounds i8, i8* %b.addr.0351, i64 16 311 %add.ptr190 = getelementptr inbounds i8, i8* %c.addr.0352, i64 16 312 %add.ptr191 = getelementptr inbounds i8, i8* %d.addr.0353, i64 16 313 %add.ptr192 = getelementptr inbounds i8, i8* %e.addr.0354, i64 16 314 %exitcond = icmp eq i32 %inc, 8 315 br i1 %exitcond, label %for.end, label %for.body 316 317for.end: ; preds = %for.body 318 ret void 319} 320 321@ib = local_unnamed_addr global [64 x i32] [i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0], align 16 322@ia = common local_unnamed_addr global [64 x i32] zeroinitializer, align 16 323 324define i32 @foo1() local_unnamed_addr #0 { 325; SSE-LABEL: @foo1( 326; SSE-NEXT: entry: 327; SSE-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([64 x i32]* @ib to <4 x i32>*), align 16 328; SSE-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[TMP0]], <i32 -1, i32 -1, i32 -1, i32 -1> 329; SSE-NEXT: store <4 x i32> [[TMP1]], <4 x i32>* bitcast ([64 x i32]* @ia to <4 x i32>*), align 16 330; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 4) to <4 x i32>*), align 16 331; SSE-NEXT: [[TMP3:%.*]] = xor <4 x i32> [[TMP2]], <i32 -1, i32 -1, i32 -1, i32 -1> 332; SSE-NEXT: store <4 x i32> [[TMP3]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 4) to <4 x i32>*), align 16 333; SSE-NEXT: [[TMP4:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 8) to <4 x i32>*), align 16 334; SSE-NEXT: [[TMP5:%.*]] = xor <4 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1> 335; SSE-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 8) to <4 x i32>*), align 16 336; SSE-NEXT: [[TMP6:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 12) to <4 x i32>*), align 16 337; SSE-NEXT: [[TMP7:%.*]] = xor <4 x i32> [[TMP6]], <i32 -1, i32 -1, i32 -1, i32 -1> 338; SSE-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 12) to <4 x i32>*), align 16 339; SSE-NEXT: [[TMP8:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 16) to <4 x i32>*), align 16 340; SSE-NEXT: [[TMP9:%.*]] = xor <4 x i32> [[TMP8]], <i32 -1, i32 -1, i32 -1, i32 -1> 341; SSE-NEXT: store <4 x i32> [[TMP9]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 16) to <4 x i32>*), align 16 342; SSE-NEXT: [[TMP10:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 20) to <4 x i32>*), align 16 343; SSE-NEXT: [[TMP11:%.*]] = xor <4 x i32> [[TMP10]], <i32 -1, i32 -1, i32 -1, i32 -1> 344; SSE-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 20) to <4 x i32>*), align 16 345; SSE-NEXT: [[TMP12:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 24) to <4 x i32>*), align 16 346; SSE-NEXT: [[TMP13:%.*]] = xor <4 x i32> [[TMP12]], <i32 -1, i32 -1, i32 -1, i32 -1> 347; SSE-NEXT: store <4 x i32> [[TMP13]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 24) to <4 x i32>*), align 16 348; SSE-NEXT: [[TMP14:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 28) to <4 x i32>*), align 16 349; SSE-NEXT: [[TMP15:%.*]] = xor <4 x i32> [[TMP14]], <i32 -1, i32 -1, i32 -1, i32 -1> 350; SSE-NEXT: store <4 x i32> [[TMP15]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 28) to <4 x i32>*), align 16 351; SSE-NEXT: [[TMP16:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 32) to <4 x i32>*), align 16 352; SSE-NEXT: [[TMP17:%.*]] = xor <4 x i32> [[TMP16]], <i32 -1, i32 -1, i32 -1, i32 -1> 353; SSE-NEXT: store <4 x i32> [[TMP17]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 32) to <4 x i32>*), align 16 354; SSE-NEXT: [[TMP18:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 36) to <4 x i32>*), align 16 355; SSE-NEXT: [[TMP19:%.*]] = xor <4 x i32> [[TMP18]], <i32 -1, i32 -1, i32 -1, i32 -1> 356; SSE-NEXT: store <4 x i32> [[TMP19]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 36) to <4 x i32>*), align 16 357; SSE-NEXT: [[TMP20:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 40) to <4 x i32>*), align 16 358; SSE-NEXT: [[TMP21:%.*]] = xor <4 x i32> [[TMP20]], <i32 -1, i32 -1, i32 -1, i32 -1> 359; SSE-NEXT: store <4 x i32> [[TMP21]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 40) to <4 x i32>*), align 16 360; SSE-NEXT: [[TMP22:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 44) to <4 x i32>*), align 16 361; SSE-NEXT: [[TMP23:%.*]] = xor <4 x i32> [[TMP22]], <i32 -1, i32 -1, i32 -1, i32 -1> 362; SSE-NEXT: store <4 x i32> [[TMP23]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 44) to <4 x i32>*), align 16 363; SSE-NEXT: [[TMP24:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 48) to <4 x i32>*), align 16 364; SSE-NEXT: [[TMP25:%.*]] = xor <4 x i32> [[TMP24]], <i32 -1, i32 -1, i32 -1, i32 -1> 365; SSE-NEXT: store <4 x i32> [[TMP25]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 48) to <4 x i32>*), align 16 366; SSE-NEXT: [[TMP26:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 52) to <4 x i32>*), align 16 367; SSE-NEXT: [[TMP27:%.*]] = xor <4 x i32> [[TMP26]], <i32 -1, i32 -1, i32 -1, i32 -1> 368; SSE-NEXT: store <4 x i32> [[TMP27]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 52) to <4 x i32>*), align 16 369; SSE-NEXT: [[TMP28:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 56) to <4 x i32>*), align 16 370; SSE-NEXT: [[TMP29:%.*]] = xor <4 x i32> [[TMP28]], <i32 -1, i32 -1, i32 -1, i32 -1> 371; SSE-NEXT: store <4 x i32> [[TMP29]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 56) to <4 x i32>*), align 16 372; SSE-NEXT: [[TMP30:%.*]] = load <4 x i32>, <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 60) to <4 x i32>*), align 16 373; SSE-NEXT: [[TMP31:%.*]] = xor <4 x i32> [[TMP30]], <i32 -1, i32 -1, i32 -1, i32 -1> 374; SSE-NEXT: store <4 x i32> [[TMP31]], <4 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 60) to <4 x i32>*), align 16 375; SSE-NEXT: br label [[FOR_BODY5:%.*]] 376; SSE: for.cond3: 377; SSE-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV:%.*]], 1 378; SSE-NEXT: [[CMP4:%.*]] = icmp ult i64 [[INDVARS_IV]], 63 379; SSE-NEXT: br i1 [[CMP4]], label [[FOR_BODY5]], label [[FOR_END14:%.*]] 380; SSE: for.body5: 381; SSE-NEXT: [[INDVARS_IV]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT]], [[FOR_COND3:%.*]] ] 382; SSE-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [64 x i32], [64 x i32]* @ia, i64 0, i64 [[INDVARS_IV]] 383; SSE-NEXT: [[TMP32:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4 384; SSE-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [64 x i32], [64 x i32]* @ib, i64 0, i64 [[INDVARS_IV]] 385; SSE-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 386; SSE-NEXT: [[NEG10:%.*]] = xor i32 [[TMP33]], -1 387; SSE-NEXT: [[CMP11:%.*]] = icmp eq i32 [[TMP32]], [[NEG10]] 388; SSE-NEXT: br i1 [[CMP11]], label [[FOR_COND3]], label [[IF_THEN:%.*]] 389; SSE: if.then: 390; SSE-NEXT: tail call void @abort() 391; SSE-NEXT: unreachable 392; SSE: for.end14: 393; SSE-NEXT: ret i32 0 394; 395; AVX512-LABEL: @foo1( 396; AVX512-NEXT: entry: 397; AVX512-NEXT: [[TMP0:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([64 x i32]* @ib to <16 x i32>*), align 16 398; AVX512-NEXT: [[TMP1:%.*]] = xor <16 x i32> [[TMP0]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> 399; AVX512-NEXT: store <16 x i32> [[TMP1]], <16 x i32>* bitcast ([64 x i32]* @ia to <16 x i32>*), align 16 400; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, <16 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 16) to <16 x i32>*), align 16 401; AVX512-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[TMP2]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> 402; AVX512-NEXT: store <16 x i32> [[TMP3]], <16 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 16) to <16 x i32>*), align 16 403; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i32>, <16 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 32) to <16 x i32>*), align 16 404; AVX512-NEXT: [[TMP5:%.*]] = xor <16 x i32> [[TMP4]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> 405; AVX512-NEXT: store <16 x i32> [[TMP5]], <16 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 32) to <16 x i32>*), align 16 406; AVX512-NEXT: [[TMP6:%.*]] = load <16 x i32>, <16 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 48) to <16 x i32>*), align 16 407; AVX512-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[TMP6]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1> 408; AVX512-NEXT: store <16 x i32> [[TMP7]], <16 x i32>* bitcast (i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 48) to <16 x i32>*), align 16 409; AVX512-NEXT: br label [[FOR_BODY5:%.*]] 410; AVX512: for.cond3: 411; AVX512-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV:%.*]], 1 412; AVX512-NEXT: [[CMP4:%.*]] = icmp ult i64 [[INDVARS_IV]], 63 413; AVX512-NEXT: br i1 [[CMP4]], label [[FOR_BODY5]], label [[FOR_END14:%.*]] 414; AVX512: for.body5: 415; AVX512-NEXT: [[INDVARS_IV]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT]], [[FOR_COND3:%.*]] ] 416; AVX512-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [64 x i32], [64 x i32]* @ia, i64 0, i64 [[INDVARS_IV]] 417; AVX512-NEXT: [[TMP8:%.*]] = load i32, i32* [[ARRAYIDX7]], align 4 418; AVX512-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [64 x i32], [64 x i32]* @ib, i64 0, i64 [[INDVARS_IV]] 419; AVX512-NEXT: [[TMP9:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 420; AVX512-NEXT: [[NEG10:%.*]] = xor i32 [[TMP9]], -1 421; AVX512-NEXT: [[CMP11:%.*]] = icmp eq i32 [[TMP8]], [[NEG10]] 422; AVX512-NEXT: br i1 [[CMP11]], label [[FOR_COND3]], label [[IF_THEN:%.*]] 423; AVX512: if.then: 424; AVX512-NEXT: tail call void @abort() 425; AVX512-NEXT: unreachable 426; AVX512: for.end14: 427; AVX512-NEXT: ret i32 0 428; 429entry: 430 %0 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 0), align 16 431 %neg = xor i32 %0, -1 432 store i32 %neg, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 0), align 16 433 %1 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 1), align 4 434 %neg.1 = xor i32 %1, -1 435 store i32 %neg.1, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 1), align 4 436 %2 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 2), align 8 437 %neg.2 = xor i32 %2, -1 438 store i32 %neg.2, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 2), align 8 439 %3 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 3), align 4 440 %neg.3 = xor i32 %3, -1 441 store i32 %neg.3, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 3), align 4 442 %4 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 4), align 16 443 %neg.4 = xor i32 %4, -1 444 store i32 %neg.4, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 4), align 16 445 %5 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 5), align 4 446 %neg.5 = xor i32 %5, -1 447 store i32 %neg.5, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 5), align 4 448 %6 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 6), align 8 449 %neg.6 = xor i32 %6, -1 450 store i32 %neg.6, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 6), align 8 451 %7 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 7), align 4 452 %neg.7 = xor i32 %7, -1 453 store i32 %neg.7, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 7), align 4 454 %8 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 8), align 16 455 %neg.8 = xor i32 %8, -1 456 store i32 %neg.8, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 8), align 16 457 %9 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 9), align 4 458 %neg.9 = xor i32 %9, -1 459 store i32 %neg.9, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 9), align 4 460 %10 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 10), align 8 461 %neg.10 = xor i32 %10, -1 462 store i32 %neg.10, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 10), align 8 463 %11 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 11), align 4 464 %neg.11 = xor i32 %11, -1 465 store i32 %neg.11, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 11), align 4 466 %12 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 12), align 16 467 %neg.12 = xor i32 %12, -1 468 store i32 %neg.12, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 12), align 16 469 %13 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 13), align 4 470 %neg.13 = xor i32 %13, -1 471 store i32 %neg.13, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 13), align 4 472 %14 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 14), align 8 473 %neg.14 = xor i32 %14, -1 474 store i32 %neg.14, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 14), align 8 475 %15 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 15), align 4 476 %neg.15 = xor i32 %15, -1 477 store i32 %neg.15, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 15), align 4 478 %16 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 16), align 16 479 %neg.16 = xor i32 %16, -1 480 store i32 %neg.16, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 16), align 16 481 %17 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 17), align 4 482 %neg.17 = xor i32 %17, -1 483 store i32 %neg.17, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 17), align 4 484 %18 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 18), align 8 485 %neg.18 = xor i32 %18, -1 486 store i32 %neg.18, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 18), align 8 487 %19 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 19), align 4 488 %neg.19 = xor i32 %19, -1 489 store i32 %neg.19, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 19), align 4 490 %20 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 20), align 16 491 %neg.20 = xor i32 %20, -1 492 store i32 %neg.20, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 20), align 16 493 %21 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 21), align 4 494 %neg.21 = xor i32 %21, -1 495 store i32 %neg.21, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 21), align 4 496 %22 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 22), align 8 497 %neg.22 = xor i32 %22, -1 498 store i32 %neg.22, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 22), align 8 499 %23 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 23), align 4 500 %neg.23 = xor i32 %23, -1 501 store i32 %neg.23, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 23), align 4 502 %24 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 24), align 16 503 %neg.24 = xor i32 %24, -1 504 store i32 %neg.24, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 24), align 16 505 %25 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 25), align 4 506 %neg.25 = xor i32 %25, -1 507 store i32 %neg.25, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 25), align 4 508 %26 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 26), align 8 509 %neg.26 = xor i32 %26, -1 510 store i32 %neg.26, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 26), align 8 511 %27 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 27), align 4 512 %neg.27 = xor i32 %27, -1 513 store i32 %neg.27, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 27), align 4 514 %28 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 28), align 16 515 %neg.28 = xor i32 %28, -1 516 store i32 %neg.28, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 28), align 16 517 %29 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 29), align 4 518 %neg.29 = xor i32 %29, -1 519 store i32 %neg.29, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 29), align 4 520 %30 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 30), align 8 521 %neg.30 = xor i32 %30, -1 522 store i32 %neg.30, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 30), align 8 523 %31 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 31), align 4 524 %neg.31 = xor i32 %31, -1 525 store i32 %neg.31, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 31), align 4 526 %32 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 32), align 16 527 %neg.32 = xor i32 %32, -1 528 store i32 %neg.32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 32), align 16 529 %33 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 33), align 4 530 %neg.33 = xor i32 %33, -1 531 store i32 %neg.33, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 33), align 4 532 %34 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 34), align 8 533 %neg.34 = xor i32 %34, -1 534 store i32 %neg.34, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 34), align 8 535 %35 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 35), align 4 536 %neg.35 = xor i32 %35, -1 537 store i32 %neg.35, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 35), align 4 538 %36 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 36), align 16 539 %neg.36 = xor i32 %36, -1 540 store i32 %neg.36, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 36), align 16 541 %37 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 37), align 4 542 %neg.37 = xor i32 %37, -1 543 store i32 %neg.37, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 37), align 4 544 %38 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 38), align 8 545 %neg.38 = xor i32 %38, -1 546 store i32 %neg.38, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 38), align 8 547 %39 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 39), align 4 548 %neg.39 = xor i32 %39, -1 549 store i32 %neg.39, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 39), align 4 550 %40 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 40), align 16 551 %neg.40 = xor i32 %40, -1 552 store i32 %neg.40, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 40), align 16 553 %41 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 41), align 4 554 %neg.41 = xor i32 %41, -1 555 store i32 %neg.41, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 41), align 4 556 %42 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 42), align 8 557 %neg.42 = xor i32 %42, -1 558 store i32 %neg.42, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 42), align 8 559 %43 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 43), align 4 560 %neg.43 = xor i32 %43, -1 561 store i32 %neg.43, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 43), align 4 562 %44 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 44), align 16 563 %neg.44 = xor i32 %44, -1 564 store i32 %neg.44, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 44), align 16 565 %45 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 45), align 4 566 %neg.45 = xor i32 %45, -1 567 store i32 %neg.45, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 45), align 4 568 %46 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 46), align 8 569 %neg.46 = xor i32 %46, -1 570 store i32 %neg.46, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 46), align 8 571 %47 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 47), align 4 572 %neg.47 = xor i32 %47, -1 573 store i32 %neg.47, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 47), align 4 574 %48 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 48), align 16 575 %neg.48 = xor i32 %48, -1 576 store i32 %neg.48, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 48), align 16 577 %49 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 49), align 4 578 %neg.49 = xor i32 %49, -1 579 store i32 %neg.49, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 49), align 4 580 %50 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 50), align 8 581 %neg.50 = xor i32 %50, -1 582 store i32 %neg.50, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 50), align 8 583 %51 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 51), align 4 584 %neg.51 = xor i32 %51, -1 585 store i32 %neg.51, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 51), align 4 586 %52 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 52), align 16 587 %neg.52 = xor i32 %52, -1 588 store i32 %neg.52, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 52), align 16 589 %53 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 53), align 4 590 %neg.53 = xor i32 %53, -1 591 store i32 %neg.53, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 53), align 4 592 %54 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 54), align 8 593 %neg.54 = xor i32 %54, -1 594 store i32 %neg.54, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 54), align 8 595 %55 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 55), align 4 596 %neg.55 = xor i32 %55, -1 597 store i32 %neg.55, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 55), align 4 598 %56 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 56), align 16 599 %neg.56 = xor i32 %56, -1 600 store i32 %neg.56, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 56), align 16 601 %57 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 57), align 4 602 %neg.57 = xor i32 %57, -1 603 store i32 %neg.57, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 57), align 4 604 %58 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 58), align 8 605 %neg.58 = xor i32 %58, -1 606 store i32 %neg.58, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 58), align 8 607 %59 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 59), align 4 608 %neg.59 = xor i32 %59, -1 609 store i32 %neg.59, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 59), align 4 610 %60 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 60), align 16 611 %neg.60 = xor i32 %60, -1 612 store i32 %neg.60, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 60), align 16 613 %61 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 61), align 4 614 %neg.61 = xor i32 %61, -1 615 store i32 %neg.61, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 61), align 4 616 %62 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 62), align 8 617 %neg.62 = xor i32 %62, -1 618 store i32 %neg.62, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 62), align 8 619 %63 = load i32, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ib, i64 0, i64 63), align 4 620 %neg.63 = xor i32 %63, -1 621 store i32 %neg.63, i32* getelementptr inbounds ([64 x i32], [64 x i32]* @ia, i64 0, i64 63), align 4 622 br label %for.body5 623 624for.cond3: ; preds = %for.body5 625 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 626 %cmp4 = icmp ult i64 %indvars.iv, 63 627 br i1 %cmp4, label %for.body5, label %for.end14 628 629for.body5: ; preds = %entry, %for.cond3 630 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.cond3 ] 631 %arrayidx7 = getelementptr inbounds [64 x i32], [64 x i32]* @ia, i64 0, i64 %indvars.iv 632 %64 = load i32, i32* %arrayidx7, align 4 633 %arrayidx9 = getelementptr inbounds [64 x i32], [64 x i32]* @ib, i64 0, i64 %indvars.iv 634 %65 = load i32, i32* %arrayidx9, align 4 635 %neg10 = xor i32 %65, -1 636 %cmp11 = icmp eq i32 %64, %neg10 637 br i1 %cmp11, label %for.cond3, label %if.then 638 639if.then: ; preds = %for.body5 640 tail call void @abort() #2 641 unreachable 642 643for.end14: ; preds = %for.cond3 644 ret i32 0 645} 646 647declare void @abort() #2 648