1 //===- InstructionCombining.cpp - Combine multiple instructions -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // InstructionCombining - Combine instructions to form fewer, simple 11 // instructions. This pass does not modify the CFG. This pass is where 12 // algebraic simplification happens. 13 // 14 // This pass combines things like: 15 // %Y = add i32 %X, 1 16 // %Z = add i32 %Y, 1 17 // into: 18 // %Z = add i32 %X, 2 19 // 20 // This is a simple worklist driven algorithm. 21 // 22 // This pass guarantees that the following canonicalizations are performed on 23 // the program: 24 // 1. If a binary operator has a constant operand, it is moved to the RHS 25 // 2. Bitwise operators with constant operands are always grouped so that 26 // shifts are performed first, then or's, then and's, then xor's. 27 // 3. Compare instructions are converted from <,>,<=,>= to ==,!= if possible 28 // 4. All cmp instructions on boolean values are replaced with logical ops 29 // 5. add X, X is represented as (X*2) => (X << 1) 30 // 6. Multiplies with a power-of-two constant argument are transformed into 31 // shifts. 32 // ... etc. 33 // 34 //===----------------------------------------------------------------------===// 35 36 #define DEBUG_TYPE "instcombine" 37 #include "llvm/Transforms/Scalar.h" 38 #include "InstCombine.h" 39 #include "llvm/IntrinsicInst.h" 40 #include "llvm/Analysis/ConstantFolding.h" 41 #include "llvm/Analysis/InstructionSimplify.h" 42 #include "llvm/Analysis/MemoryBuiltins.h" 43 #include "llvm/Target/TargetData.h" 44 #include "llvm/Target/TargetLibraryInfo.h" 45 #include "llvm/Transforms/Utils/Local.h" 46 #include "llvm/Support/CFG.h" 47 #include "llvm/Support/Debug.h" 48 #include "llvm/Support/GetElementPtrTypeIterator.h" 49 #include "llvm/Support/PatternMatch.h" 50 #include "llvm/Support/ValueHandle.h" 51 #include "llvm/ADT/SmallPtrSet.h" 52 #include "llvm/ADT/Statistic.h" 53 #include "llvm/ADT/StringSwitch.h" 54 #include "llvm-c/Initialization.h" 55 #include <algorithm> 56 #include <climits> 57 using namespace llvm; 58 using namespace llvm::PatternMatch; 59 60 STATISTIC(NumCombined , "Number of insts combined"); 61 STATISTIC(NumConstProp, "Number of constant folds"); 62 STATISTIC(NumDeadInst , "Number of dead inst eliminated"); 63 STATISTIC(NumSunkInst , "Number of instructions sunk"); 64 STATISTIC(NumExpand, "Number of expansions"); 65 STATISTIC(NumFactor , "Number of factorizations"); 66 STATISTIC(NumReassoc , "Number of reassociations"); 67 68 // Initialization Routines 69 void llvm::initializeInstCombine(PassRegistry &Registry) { 70 initializeInstCombinerPass(Registry); 71 } 72 73 void LLVMInitializeInstCombine(LLVMPassRegistryRef R) { 74 initializeInstCombine(*unwrap(R)); 75 } 76 77 char InstCombiner::ID = 0; 78 INITIALIZE_PASS(InstCombiner, "instcombine", 79 "Combine redundant instructions", false, false) 80 81 void InstCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 82 AU.setPreservesCFG(); 83 AU.addRequired<TargetLibraryInfo>(); 84 } 85 86 87 /// ShouldChangeType - Return true if it is desirable to convert a computation 88 /// from 'From' to 'To'. We don't want to convert from a legal to an illegal 89 /// type for example, or from a smaller to a larger illegal type. 90 bool InstCombiner::ShouldChangeType(Type *From, Type *To) const { 91 assert(From->isIntegerTy() && To->isIntegerTy()); 92 93 // If we don't have TD, we don't know if the source/dest are legal. 94 if (!TD) return false; 95 96 unsigned FromWidth = From->getPrimitiveSizeInBits(); 97 unsigned ToWidth = To->getPrimitiveSizeInBits(); 98 bool FromLegal = TD->isLegalInteger(FromWidth); 99 bool ToLegal = TD->isLegalInteger(ToWidth); 100 101 // If this is a legal integer from type, and the result would be an illegal 102 // type, don't do the transformation. 103 if (FromLegal && !ToLegal) 104 return false; 105 106 // Otherwise, if both are illegal, do not increase the size of the result. We 107 // do allow things like i160 -> i64, but not i64 -> i160. 108 if (!FromLegal && !ToLegal && ToWidth > FromWidth) 109 return false; 110 111 return true; 112 } 113 114 // Return true, if No Signed Wrap should be maintained for I. 115 // The No Signed Wrap flag can be kept if the operation "B (I.getOpcode) C", 116 // where both B and C should be ConstantInts, results in a constant that does 117 // not overflow. This function only handles the Add and Sub opcodes. For 118 // all other opcodes, the function conservatively returns false. 119 static bool MaintainNoSignedWrap(BinaryOperator &I, Value *B, Value *C) { 120 OverflowingBinaryOperator *OBO = dyn_cast<OverflowingBinaryOperator>(&I); 121 if (!OBO || !OBO->hasNoSignedWrap()) { 122 return false; 123 } 124 125 // We reason about Add and Sub Only. 126 Instruction::BinaryOps Opcode = I.getOpcode(); 127 if (Opcode != Instruction::Add && 128 Opcode != Instruction::Sub) { 129 return false; 130 } 131 132 ConstantInt *CB = dyn_cast<ConstantInt>(B); 133 ConstantInt *CC = dyn_cast<ConstantInt>(C); 134 135 if (!CB || !CC) { 136 return false; 137 } 138 139 const APInt &BVal = CB->getValue(); 140 const APInt &CVal = CC->getValue(); 141 bool Overflow = false; 142 143 if (Opcode == Instruction::Add) { 144 BVal.sadd_ov(CVal, Overflow); 145 } else { 146 BVal.ssub_ov(CVal, Overflow); 147 } 148 149 return !Overflow; 150 } 151 152 /// SimplifyAssociativeOrCommutative - This performs a few simplifications for 153 /// operators which are associative or commutative: 154 // 155 // Commutative operators: 156 // 157 // 1. Order operands such that they are listed from right (least complex) to 158 // left (most complex). This puts constants before unary operators before 159 // binary operators. 160 // 161 // Associative operators: 162 // 163 // 2. Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 164 // 3. Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 165 // 166 // Associative and commutative operators: 167 // 168 // 4. Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 169 // 5. Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 170 // 6. Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 171 // if C1 and C2 are constants. 172 // 173 bool InstCombiner::SimplifyAssociativeOrCommutative(BinaryOperator &I) { 174 Instruction::BinaryOps Opcode = I.getOpcode(); 175 bool Changed = false; 176 177 do { 178 // Order operands such that they are listed from right (least complex) to 179 // left (most complex). This puts constants before unary operators before 180 // binary operators. 181 if (I.isCommutative() && getComplexity(I.getOperand(0)) < 182 getComplexity(I.getOperand(1))) 183 Changed = !I.swapOperands(); 184 185 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); 186 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(I.getOperand(1)); 187 188 if (I.isAssociative()) { 189 // Transform: "(A op B) op C" ==> "A op (B op C)" if "B op C" simplifies. 190 if (Op0 && Op0->getOpcode() == Opcode) { 191 Value *A = Op0->getOperand(0); 192 Value *B = Op0->getOperand(1); 193 Value *C = I.getOperand(1); 194 195 // Does "B op C" simplify? 196 if (Value *V = SimplifyBinOp(Opcode, B, C, TD)) { 197 // It simplifies to V. Form "A op V". 198 I.setOperand(0, A); 199 I.setOperand(1, V); 200 // Conservatively clear the optional flags, since they may not be 201 // preserved by the reassociation. 202 if (MaintainNoSignedWrap(I, B, C) && 203 (!Op0 || (isa<BinaryOperator>(Op0) && Op0->hasNoSignedWrap()))) { 204 // Note: this is only valid because SimplifyBinOp doesn't look at 205 // the operands to Op0. 206 I.clearSubclassOptionalData(); 207 I.setHasNoSignedWrap(true); 208 } else { 209 I.clearSubclassOptionalData(); 210 } 211 212 Changed = true; 213 ++NumReassoc; 214 continue; 215 } 216 } 217 218 // Transform: "A op (B op C)" ==> "(A op B) op C" if "A op B" simplifies. 219 if (Op1 && Op1->getOpcode() == Opcode) { 220 Value *A = I.getOperand(0); 221 Value *B = Op1->getOperand(0); 222 Value *C = Op1->getOperand(1); 223 224 // Does "A op B" simplify? 225 if (Value *V = SimplifyBinOp(Opcode, A, B, TD)) { 226 // It simplifies to V. Form "V op C". 227 I.setOperand(0, V); 228 I.setOperand(1, C); 229 // Conservatively clear the optional flags, since they may not be 230 // preserved by the reassociation. 231 I.clearSubclassOptionalData(); 232 Changed = true; 233 ++NumReassoc; 234 continue; 235 } 236 } 237 } 238 239 if (I.isAssociative() && I.isCommutative()) { 240 // Transform: "(A op B) op C" ==> "(C op A) op B" if "C op A" simplifies. 241 if (Op0 && Op0->getOpcode() == Opcode) { 242 Value *A = Op0->getOperand(0); 243 Value *B = Op0->getOperand(1); 244 Value *C = I.getOperand(1); 245 246 // Does "C op A" simplify? 247 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 248 // It simplifies to V. Form "V op B". 249 I.setOperand(0, V); 250 I.setOperand(1, B); 251 // Conservatively clear the optional flags, since they may not be 252 // preserved by the reassociation. 253 I.clearSubclassOptionalData(); 254 Changed = true; 255 ++NumReassoc; 256 continue; 257 } 258 } 259 260 // Transform: "A op (B op C)" ==> "B op (C op A)" if "C op A" simplifies. 261 if (Op1 && Op1->getOpcode() == Opcode) { 262 Value *A = I.getOperand(0); 263 Value *B = Op1->getOperand(0); 264 Value *C = Op1->getOperand(1); 265 266 // Does "C op A" simplify? 267 if (Value *V = SimplifyBinOp(Opcode, C, A, TD)) { 268 // It simplifies to V. Form "B op V". 269 I.setOperand(0, B); 270 I.setOperand(1, V); 271 // Conservatively clear the optional flags, since they may not be 272 // preserved by the reassociation. 273 I.clearSubclassOptionalData(); 274 Changed = true; 275 ++NumReassoc; 276 continue; 277 } 278 } 279 280 // Transform: "(A op C1) op (B op C2)" ==> "(A op B) op (C1 op C2)" 281 // if C1 and C2 are constants. 282 if (Op0 && Op1 && 283 Op0->getOpcode() == Opcode && Op1->getOpcode() == Opcode && 284 isa<Constant>(Op0->getOperand(1)) && 285 isa<Constant>(Op1->getOperand(1)) && 286 Op0->hasOneUse() && Op1->hasOneUse()) { 287 Value *A = Op0->getOperand(0); 288 Constant *C1 = cast<Constant>(Op0->getOperand(1)); 289 Value *B = Op1->getOperand(0); 290 Constant *C2 = cast<Constant>(Op1->getOperand(1)); 291 292 Constant *Folded = ConstantExpr::get(Opcode, C1, C2); 293 BinaryOperator *New = BinaryOperator::Create(Opcode, A, B); 294 InsertNewInstWith(New, I); 295 New->takeName(Op1); 296 I.setOperand(0, New); 297 I.setOperand(1, Folded); 298 // Conservatively clear the optional flags, since they may not be 299 // preserved by the reassociation. 300 I.clearSubclassOptionalData(); 301 302 Changed = true; 303 continue; 304 } 305 } 306 307 // No further simplifications. 308 return Changed; 309 } while (1); 310 } 311 312 /// LeftDistributesOverRight - Whether "X LOp (Y ROp Z)" is always equal to 313 /// "(X LOp Y) ROp (X LOp Z)". 314 static bool LeftDistributesOverRight(Instruction::BinaryOps LOp, 315 Instruction::BinaryOps ROp) { 316 switch (LOp) { 317 default: 318 return false; 319 320 case Instruction::And: 321 // And distributes over Or and Xor. 322 switch (ROp) { 323 default: 324 return false; 325 case Instruction::Or: 326 case Instruction::Xor: 327 return true; 328 } 329 330 case Instruction::Mul: 331 // Multiplication distributes over addition and subtraction. 332 switch (ROp) { 333 default: 334 return false; 335 case Instruction::Add: 336 case Instruction::Sub: 337 return true; 338 } 339 340 case Instruction::Or: 341 // Or distributes over And. 342 switch (ROp) { 343 default: 344 return false; 345 case Instruction::And: 346 return true; 347 } 348 } 349 } 350 351 /// RightDistributesOverLeft - Whether "(X LOp Y) ROp Z" is always equal to 352 /// "(X ROp Z) LOp (Y ROp Z)". 353 static bool RightDistributesOverLeft(Instruction::BinaryOps LOp, 354 Instruction::BinaryOps ROp) { 355 if (Instruction::isCommutative(ROp)) 356 return LeftDistributesOverRight(ROp, LOp); 357 // TODO: It would be nice to handle division, aka "(X + Y)/Z = X/Z + Y/Z", 358 // but this requires knowing that the addition does not overflow and other 359 // such subtleties. 360 return false; 361 } 362 363 /// SimplifyUsingDistributiveLaws - This tries to simplify binary operations 364 /// which some other binary operation distributes over either by factorizing 365 /// out common terms (eg "(A*B)+(A*C)" -> "A*(B+C)") or expanding out if this 366 /// results in simplifications (eg: "A & (B | C) -> (A&B) | (A&C)" if this is 367 /// a win). Returns the simplified value, or null if it didn't simplify. 368 Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) { 369 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); 370 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 371 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 372 Instruction::BinaryOps TopLevelOpcode = I.getOpcode(); // op 373 374 // Factorization. 375 if (Op0 && Op1 && Op0->getOpcode() == Op1->getOpcode()) { 376 // The instruction has the form "(A op' B) op (C op' D)". Try to factorize 377 // a common term. 378 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1); 379 Value *C = Op1->getOperand(0), *D = Op1->getOperand(1); 380 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 381 382 // Does "X op' Y" always equal "Y op' X"? 383 bool InnerCommutative = Instruction::isCommutative(InnerOpcode); 384 385 // Does "X op' (Y op Z)" always equal "(X op' Y) op (X op' Z)"? 386 if (LeftDistributesOverRight(InnerOpcode, TopLevelOpcode)) 387 // Does the instruction have the form "(A op' B) op (A op' D)" or, in the 388 // commutative case, "(A op' B) op (C op' A)"? 389 if (A == C || (InnerCommutative && A == D)) { 390 if (A != C) 391 std::swap(C, D); 392 // Consider forming "A op' (B op D)". 393 // If "B op D" simplifies then it can be formed with no cost. 394 Value *V = SimplifyBinOp(TopLevelOpcode, B, D, TD); 395 // If "B op D" doesn't simplify then only go on if both of the existing 396 // operations "A op' B" and "C op' D" will be zapped as no longer used. 397 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 398 V = Builder->CreateBinOp(TopLevelOpcode, B, D, Op1->getName()); 399 if (V) { 400 ++NumFactor; 401 V = Builder->CreateBinOp(InnerOpcode, A, V); 402 V->takeName(&I); 403 return V; 404 } 405 } 406 407 // Does "(X op Y) op' Z" always equal "(X op' Z) op (Y op' Z)"? 408 if (RightDistributesOverLeft(TopLevelOpcode, InnerOpcode)) 409 // Does the instruction have the form "(A op' B) op (C op' B)" or, in the 410 // commutative case, "(A op' B) op (B op' D)"? 411 if (B == D || (InnerCommutative && B == C)) { 412 if (B != D) 413 std::swap(C, D); 414 // Consider forming "(A op C) op' B". 415 // If "A op C" simplifies then it can be formed with no cost. 416 Value *V = SimplifyBinOp(TopLevelOpcode, A, C, TD); 417 // If "A op C" doesn't simplify then only go on if both of the existing 418 // operations "A op' B" and "C op' D" will be zapped as no longer used. 419 if (!V && Op0->hasOneUse() && Op1->hasOneUse()) 420 V = Builder->CreateBinOp(TopLevelOpcode, A, C, Op0->getName()); 421 if (V) { 422 ++NumFactor; 423 V = Builder->CreateBinOp(InnerOpcode, V, B); 424 V->takeName(&I); 425 return V; 426 } 427 } 428 } 429 430 // Expansion. 431 if (Op0 && RightDistributesOverLeft(Op0->getOpcode(), TopLevelOpcode)) { 432 // The instruction has the form "(A op' B) op C". See if expanding it out 433 // to "(A op C) op' (B op C)" results in simplifications. 434 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 435 Instruction::BinaryOps InnerOpcode = Op0->getOpcode(); // op' 436 437 // Do "A op C" and "B op C" both simplify? 438 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, C, TD)) 439 if (Value *R = SimplifyBinOp(TopLevelOpcode, B, C, TD)) { 440 // They do! Return "L op' R". 441 ++NumExpand; 442 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 443 if ((L == A && R == B) || 444 (Instruction::isCommutative(InnerOpcode) && L == B && R == A)) 445 return Op0; 446 // Otherwise return "L op' R" if it simplifies. 447 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 448 return V; 449 // Otherwise, create a new instruction. 450 C = Builder->CreateBinOp(InnerOpcode, L, R); 451 C->takeName(&I); 452 return C; 453 } 454 } 455 456 if (Op1 && LeftDistributesOverRight(TopLevelOpcode, Op1->getOpcode())) { 457 // The instruction has the form "A op (B op' C)". See if expanding it out 458 // to "(A op B) op' (A op C)" results in simplifications. 459 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 460 Instruction::BinaryOps InnerOpcode = Op1->getOpcode(); // op' 461 462 // Do "A op B" and "A op C" both simplify? 463 if (Value *L = SimplifyBinOp(TopLevelOpcode, A, B, TD)) 464 if (Value *R = SimplifyBinOp(TopLevelOpcode, A, C, TD)) { 465 // They do! Return "L op' R". 466 ++NumExpand; 467 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 468 if ((L == B && R == C) || 469 (Instruction::isCommutative(InnerOpcode) && L == C && R == B)) 470 return Op1; 471 // Otherwise return "L op' R" if it simplifies. 472 if (Value *V = SimplifyBinOp(InnerOpcode, L, R, TD)) 473 return V; 474 // Otherwise, create a new instruction. 475 A = Builder->CreateBinOp(InnerOpcode, L, R); 476 A->takeName(&I); 477 return A; 478 } 479 } 480 481 return 0; 482 } 483 484 // dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction 485 // if the LHS is a constant zero (which is the 'negate' form). 486 // 487 Value *InstCombiner::dyn_castNegVal(Value *V) const { 488 if (BinaryOperator::isNeg(V)) 489 return BinaryOperator::getNegArgument(V); 490 491 // Constants can be considered to be negated values if they can be folded. 492 if (ConstantInt *C = dyn_cast<ConstantInt>(V)) 493 return ConstantExpr::getNeg(C); 494 495 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 496 if (C->getType()->getElementType()->isIntegerTy()) 497 return ConstantExpr::getNeg(C); 498 499 return 0; 500 } 501 502 // dyn_castFNegVal - Given a 'fsub' instruction, return the RHS of the 503 // instruction if the LHS is a constant negative zero (which is the 'negate' 504 // form). 505 // 506 Value *InstCombiner::dyn_castFNegVal(Value *V) const { 507 if (BinaryOperator::isFNeg(V)) 508 return BinaryOperator::getFNegArgument(V); 509 510 // Constants can be considered to be negated values if they can be folded. 511 if (ConstantFP *C = dyn_cast<ConstantFP>(V)) 512 return ConstantExpr::getFNeg(C); 513 514 if (ConstantVector *C = dyn_cast<ConstantVector>(V)) 515 if (C->getType()->getElementType()->isFloatingPointTy()) 516 return ConstantExpr::getFNeg(C); 517 518 return 0; 519 } 520 521 static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO, 522 InstCombiner *IC) { 523 if (CastInst *CI = dyn_cast<CastInst>(&I)) { 524 return IC->Builder->CreateCast(CI->getOpcode(), SO, I.getType()); 525 } 526 527 // Figure out if the constant is the left or the right argument. 528 bool ConstIsRHS = isa<Constant>(I.getOperand(1)); 529 Constant *ConstOperand = cast<Constant>(I.getOperand(ConstIsRHS)); 530 531 if (Constant *SOC = dyn_cast<Constant>(SO)) { 532 if (ConstIsRHS) 533 return ConstantExpr::get(I.getOpcode(), SOC, ConstOperand); 534 return ConstantExpr::get(I.getOpcode(), ConstOperand, SOC); 535 } 536 537 Value *Op0 = SO, *Op1 = ConstOperand; 538 if (!ConstIsRHS) 539 std::swap(Op0, Op1); 540 541 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(&I)) 542 return IC->Builder->CreateBinOp(BO->getOpcode(), Op0, Op1, 543 SO->getName()+".op"); 544 if (ICmpInst *CI = dyn_cast<ICmpInst>(&I)) 545 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 546 SO->getName()+".cmp"); 547 if (FCmpInst *CI = dyn_cast<FCmpInst>(&I)) 548 return IC->Builder->CreateICmp(CI->getPredicate(), Op0, Op1, 549 SO->getName()+".cmp"); 550 llvm_unreachable("Unknown binary instruction type!"); 551 } 552 553 // FoldOpIntoSelect - Given an instruction with a select as one operand and a 554 // constant as the other operand, try to fold the binary operator into the 555 // select arguments. This also works for Cast instructions, which obviously do 556 // not have a second operand. 557 Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) { 558 // Don't modify shared select instructions 559 if (!SI->hasOneUse()) return 0; 560 Value *TV = SI->getOperand(1); 561 Value *FV = SI->getOperand(2); 562 563 if (isa<Constant>(TV) || isa<Constant>(FV)) { 564 // Bool selects with constant operands can be folded to logical ops. 565 if (SI->getType()->isIntegerTy(1)) return 0; 566 567 // If it's a bitcast involving vectors, make sure it has the same number of 568 // elements on both sides. 569 if (BitCastInst *BC = dyn_cast<BitCastInst>(&Op)) { 570 VectorType *DestTy = dyn_cast<VectorType>(BC->getDestTy()); 571 VectorType *SrcTy = dyn_cast<VectorType>(BC->getSrcTy()); 572 573 // Verify that either both or neither are vectors. 574 if ((SrcTy == NULL) != (DestTy == NULL)) return 0; 575 // If vectors, verify that they have the same number of elements. 576 if (SrcTy && SrcTy->getNumElements() != DestTy->getNumElements()) 577 return 0; 578 } 579 580 Value *SelectTrueVal = FoldOperationIntoSelectOperand(Op, TV, this); 581 Value *SelectFalseVal = FoldOperationIntoSelectOperand(Op, FV, this); 582 583 return SelectInst::Create(SI->getCondition(), 584 SelectTrueVal, SelectFalseVal); 585 } 586 return 0; 587 } 588 589 590 /// FoldOpIntoPhi - Given a binary operator, cast instruction, or select which 591 /// has a PHI node as operand #0, see if we can fold the instruction into the 592 /// PHI (which is only possible if all operands to the PHI are constants). 593 /// 594 Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) { 595 PHINode *PN = cast<PHINode>(I.getOperand(0)); 596 unsigned NumPHIValues = PN->getNumIncomingValues(); 597 if (NumPHIValues == 0) 598 return 0; 599 600 // We normally only transform phis with a single use. However, if a PHI has 601 // multiple uses and they are all the same operation, we can fold *all* of the 602 // uses into the PHI. 603 if (!PN->hasOneUse()) { 604 // Walk the use list for the instruction, comparing them to I. 605 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); 606 UI != E; ++UI) { 607 Instruction *User = cast<Instruction>(*UI); 608 if (User != &I && !I.isIdenticalTo(User)) 609 return 0; 610 } 611 // Otherwise, we can replace *all* users with the new PHI we form. 612 } 613 614 // Check to see if all of the operands of the PHI are simple constants 615 // (constantint/constantfp/undef). If there is one non-constant value, 616 // remember the BB it is in. If there is more than one or if *it* is a PHI, 617 // bail out. We don't do arbitrary constant expressions here because moving 618 // their computation can be expensive without a cost model. 619 BasicBlock *NonConstBB = 0; 620 for (unsigned i = 0; i != NumPHIValues; ++i) { 621 Value *InVal = PN->getIncomingValue(i); 622 if (isa<Constant>(InVal) && !isa<ConstantExpr>(InVal)) 623 continue; 624 625 if (isa<PHINode>(InVal)) return 0; // Itself a phi. 626 if (NonConstBB) return 0; // More than one non-const value. 627 628 NonConstBB = PN->getIncomingBlock(i); 629 630 // If the InVal is an invoke at the end of the pred block, then we can't 631 // insert a computation after it without breaking the edge. 632 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal)) 633 if (II->getParent() == NonConstBB) 634 return 0; 635 636 // If the incoming non-constant value is in I's block, we will remove one 637 // instruction, but insert another equivalent one, leading to infinite 638 // instcombine. 639 if (NonConstBB == I.getParent()) 640 return 0; 641 } 642 643 // If there is exactly one non-constant value, we can insert a copy of the 644 // operation in that block. However, if this is a critical edge, we would be 645 // inserting the computation one some other paths (e.g. inside a loop). Only 646 // do this if the pred block is unconditionally branching into the phi block. 647 if (NonConstBB != 0) { 648 BranchInst *BI = dyn_cast<BranchInst>(NonConstBB->getTerminator()); 649 if (!BI || !BI->isUnconditional()) return 0; 650 } 651 652 // Okay, we can do the transformation: create the new PHI node. 653 PHINode *NewPN = PHINode::Create(I.getType(), PN->getNumIncomingValues()); 654 InsertNewInstBefore(NewPN, *PN); 655 NewPN->takeName(PN); 656 657 // If we are going to have to insert a new computation, do so right before the 658 // predecessors terminator. 659 if (NonConstBB) 660 Builder->SetInsertPoint(NonConstBB->getTerminator()); 661 662 // Next, add all of the operands to the PHI. 663 if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { 664 // We only currently try to fold the condition of a select when it is a phi, 665 // not the true/false values. 666 Value *TrueV = SI->getTrueValue(); 667 Value *FalseV = SI->getFalseValue(); 668 BasicBlock *PhiTransBB = PN->getParent(); 669 for (unsigned i = 0; i != NumPHIValues; ++i) { 670 BasicBlock *ThisBB = PN->getIncomingBlock(i); 671 Value *TrueVInPred = TrueV->DoPHITranslation(PhiTransBB, ThisBB); 672 Value *FalseVInPred = FalseV->DoPHITranslation(PhiTransBB, ThisBB); 673 Value *InV = 0; 674 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 675 InV = InC->isNullValue() ? FalseVInPred : TrueVInPred; 676 else 677 InV = Builder->CreateSelect(PN->getIncomingValue(i), 678 TrueVInPred, FalseVInPred, "phitmp"); 679 NewPN->addIncoming(InV, ThisBB); 680 } 681 } else if (CmpInst *CI = dyn_cast<CmpInst>(&I)) { 682 Constant *C = cast<Constant>(I.getOperand(1)); 683 for (unsigned i = 0; i != NumPHIValues; ++i) { 684 Value *InV = 0; 685 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 686 InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); 687 else if (isa<ICmpInst>(CI)) 688 InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i), 689 C, "phitmp"); 690 else 691 InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i), 692 C, "phitmp"); 693 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 694 } 695 } else if (I.getNumOperands() == 2) { 696 Constant *C = cast<Constant>(I.getOperand(1)); 697 for (unsigned i = 0; i != NumPHIValues; ++i) { 698 Value *InV = 0; 699 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 700 InV = ConstantExpr::get(I.getOpcode(), InC, C); 701 else 702 InV = Builder->CreateBinOp(cast<BinaryOperator>(I).getOpcode(), 703 PN->getIncomingValue(i), C, "phitmp"); 704 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 705 } 706 } else { 707 CastInst *CI = cast<CastInst>(&I); 708 Type *RetTy = CI->getType(); 709 for (unsigned i = 0; i != NumPHIValues; ++i) { 710 Value *InV; 711 if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) 712 InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); 713 else 714 InV = Builder->CreateCast(CI->getOpcode(), 715 PN->getIncomingValue(i), I.getType(), "phitmp"); 716 NewPN->addIncoming(InV, PN->getIncomingBlock(i)); 717 } 718 } 719 720 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); 721 UI != E; ) { 722 Instruction *User = cast<Instruction>(*UI++); 723 if (User == &I) continue; 724 ReplaceInstUsesWith(*User, NewPN); 725 EraseInstFromFunction(*User); 726 } 727 return ReplaceInstUsesWith(I, NewPN); 728 } 729 730 /// FindElementAtOffset - Given a type and a constant offset, determine whether 731 /// or not there is a sequence of GEP indices into the type that will land us at 732 /// the specified offset. If so, fill them into NewIndices and return the 733 /// resultant element type, otherwise return null. 734 Type *InstCombiner::FindElementAtOffset(Type *Ty, int64_t Offset, 735 SmallVectorImpl<Value*> &NewIndices) { 736 if (!TD) return 0; 737 if (!Ty->isSized()) return 0; 738 739 // Start with the index over the outer type. Note that the type size 740 // might be zero (even if the offset isn't zero) if the indexed type 741 // is something like [0 x {int, int}] 742 Type *IntPtrTy = TD->getIntPtrType(Ty->getContext()); 743 int64_t FirstIdx = 0; 744 if (int64_t TySize = TD->getTypeAllocSize(Ty)) { 745 FirstIdx = Offset/TySize; 746 Offset -= FirstIdx*TySize; 747 748 // Handle hosts where % returns negative instead of values [0..TySize). 749 if (Offset < 0) { 750 --FirstIdx; 751 Offset += TySize; 752 assert(Offset >= 0); 753 } 754 assert((uint64_t)Offset < (uint64_t)TySize && "Out of range offset"); 755 } 756 757 NewIndices.push_back(ConstantInt::get(IntPtrTy, FirstIdx)); 758 759 // Index into the types. If we fail, set OrigBase to null. 760 while (Offset) { 761 // Indexing into tail padding between struct/array elements. 762 if (uint64_t(Offset*8) >= TD->getTypeSizeInBits(Ty)) 763 return 0; 764 765 if (StructType *STy = dyn_cast<StructType>(Ty)) { 766 const StructLayout *SL = TD->getStructLayout(STy); 767 assert(Offset < (int64_t)SL->getSizeInBytes() && 768 "Offset must stay within the indexed type"); 769 770 unsigned Elt = SL->getElementContainingOffset(Offset); 771 NewIndices.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()), 772 Elt)); 773 774 Offset -= SL->getElementOffset(Elt); 775 Ty = STy->getElementType(Elt); 776 } else if (ArrayType *AT = dyn_cast<ArrayType>(Ty)) { 777 uint64_t EltSize = TD->getTypeAllocSize(AT->getElementType()); 778 assert(EltSize && "Cannot index into a zero-sized array"); 779 NewIndices.push_back(ConstantInt::get(IntPtrTy,Offset/EltSize)); 780 Offset %= EltSize; 781 Ty = AT->getElementType(); 782 } else { 783 // Otherwise, we can't index into the middle of this atomic type, bail. 784 return 0; 785 } 786 } 787 788 return Ty; 789 } 790 791 static bool shouldMergeGEPs(GEPOperator &GEP, GEPOperator &Src) { 792 // If this GEP has only 0 indices, it is the same pointer as 793 // Src. If Src is not a trivial GEP too, don't combine 794 // the indices. 795 if (GEP.hasAllZeroIndices() && !Src.hasAllZeroIndices() && 796 !Src.hasOneUse()) 797 return false; 798 return true; 799 } 800 801 Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) { 802 SmallVector<Value*, 8> Ops(GEP.op_begin(), GEP.op_end()); 803 804 if (Value *V = SimplifyGEPInst(Ops, TD)) 805 return ReplaceInstUsesWith(GEP, V); 806 807 Value *PtrOp = GEP.getOperand(0); 808 809 // Eliminate unneeded casts for indices, and replace indices which displace 810 // by multiples of a zero size type with zero. 811 if (TD) { 812 bool MadeChange = false; 813 Type *IntPtrTy = TD->getIntPtrType(GEP.getContext()); 814 815 gep_type_iterator GTI = gep_type_begin(GEP); 816 for (User::op_iterator I = GEP.op_begin() + 1, E = GEP.op_end(); 817 I != E; ++I, ++GTI) { 818 // Skip indices into struct types. 819 SequentialType *SeqTy = dyn_cast<SequentialType>(*GTI); 820 if (!SeqTy) continue; 821 822 // If the element type has zero size then any index over it is equivalent 823 // to an index of zero, so replace it with zero if it is not zero already. 824 if (SeqTy->getElementType()->isSized() && 825 TD->getTypeAllocSize(SeqTy->getElementType()) == 0) 826 if (!isa<Constant>(*I) || !cast<Constant>(*I)->isNullValue()) { 827 *I = Constant::getNullValue(IntPtrTy); 828 MadeChange = true; 829 } 830 831 if ((*I)->getType() != IntPtrTy) { 832 // If we are using a wider index than needed for this platform, shrink 833 // it to what we need. If narrower, sign-extend it to what we need. 834 // This explicit cast can make subsequent optimizations more obvious. 835 *I = Builder->CreateIntCast(*I, IntPtrTy, true); 836 MadeChange = true; 837 } 838 } 839 if (MadeChange) return &GEP; 840 } 841 842 // Combine Indices - If the source pointer to this getelementptr instruction 843 // is a getelementptr instruction, combine the indices of the two 844 // getelementptr instructions into a single instruction. 845 // 846 if (GEPOperator *Src = dyn_cast<GEPOperator>(PtrOp)) { 847 if (!shouldMergeGEPs(*cast<GEPOperator>(&GEP), *Src)) 848 return 0; 849 850 // Note that if our source is a gep chain itself that we wait for that 851 // chain to be resolved before we perform this transformation. This 852 // avoids us creating a TON of code in some cases. 853 if (GEPOperator *SrcGEP = 854 dyn_cast<GEPOperator>(Src->getOperand(0))) 855 if (SrcGEP->getNumOperands() == 2 && shouldMergeGEPs(*Src, *SrcGEP)) 856 return 0; // Wait until our source is folded to completion. 857 858 SmallVector<Value*, 8> Indices; 859 860 // Find out whether the last index in the source GEP is a sequential idx. 861 bool EndsWithSequential = false; 862 for (gep_type_iterator I = gep_type_begin(*Src), E = gep_type_end(*Src); 863 I != E; ++I) 864 EndsWithSequential = !(*I)->isStructTy(); 865 866 // Can we combine the two pointer arithmetics offsets? 867 if (EndsWithSequential) { 868 // Replace: gep (gep %P, long B), long A, ... 869 // With: T = long A+B; gep %P, T, ... 870 // 871 Value *Sum; 872 Value *SO1 = Src->getOperand(Src->getNumOperands()-1); 873 Value *GO1 = GEP.getOperand(1); 874 if (SO1 == Constant::getNullValue(SO1->getType())) { 875 Sum = GO1; 876 } else if (GO1 == Constant::getNullValue(GO1->getType())) { 877 Sum = SO1; 878 } else { 879 // If they aren't the same type, then the input hasn't been processed 880 // by the loop above yet (which canonicalizes sequential index types to 881 // intptr_t). Just avoid transforming this until the input has been 882 // normalized. 883 if (SO1->getType() != GO1->getType()) 884 return 0; 885 Sum = Builder->CreateAdd(SO1, GO1, PtrOp->getName()+".sum"); 886 } 887 888 // Update the GEP in place if possible. 889 if (Src->getNumOperands() == 2) { 890 GEP.setOperand(0, Src->getOperand(0)); 891 GEP.setOperand(1, Sum); 892 return &GEP; 893 } 894 Indices.append(Src->op_begin()+1, Src->op_end()-1); 895 Indices.push_back(Sum); 896 Indices.append(GEP.op_begin()+2, GEP.op_end()); 897 } else if (isa<Constant>(*GEP.idx_begin()) && 898 cast<Constant>(*GEP.idx_begin())->isNullValue() && 899 Src->getNumOperands() != 1) { 900 // Otherwise we can do the fold if the first index of the GEP is a zero 901 Indices.append(Src->op_begin()+1, Src->op_end()); 902 Indices.append(GEP.idx_begin()+1, GEP.idx_end()); 903 } 904 905 if (!Indices.empty()) 906 return (GEP.isInBounds() && Src->isInBounds()) ? 907 GetElementPtrInst::CreateInBounds(Src->getOperand(0), Indices, 908 GEP.getName()) : 909 GetElementPtrInst::Create(Src->getOperand(0), Indices, GEP.getName()); 910 } 911 912 // Handle gep(bitcast x) and gep(gep x, 0, 0, 0). 913 Value *StrippedPtr = PtrOp->stripPointerCasts(); 914 PointerType *StrippedPtrTy =cast<PointerType>(StrippedPtr->getType()); 915 if (StrippedPtr != PtrOp && 916 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) { 917 918 bool HasZeroPointerIndex = false; 919 if (ConstantInt *C = dyn_cast<ConstantInt>(GEP.getOperand(1))) 920 HasZeroPointerIndex = C->isZero(); 921 922 // Transform: GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... 923 // into : GEP [10 x i8]* X, i32 0, ... 924 // 925 // Likewise, transform: GEP (bitcast i8* X to [0 x i8]*), i32 0, ... 926 // into : GEP i8* X, ... 927 // 928 // This occurs when the program declares an array extern like "int X[];" 929 if (HasZeroPointerIndex) { 930 PointerType *CPTy = cast<PointerType>(PtrOp->getType()); 931 if (ArrayType *CATy = 932 dyn_cast<ArrayType>(CPTy->getElementType())) { 933 // GEP (bitcast i8* X to [0 x i8]*), i32 0, ... ? 934 if (CATy->getElementType() == StrippedPtrTy->getElementType()) { 935 // -> GEP i8* X, ... 936 SmallVector<Value*, 8> Idx(GEP.idx_begin()+1, GEP.idx_end()); 937 GetElementPtrInst *Res = 938 GetElementPtrInst::Create(StrippedPtr, Idx, GEP.getName()); 939 Res->setIsInBounds(GEP.isInBounds()); 940 return Res; 941 } 942 943 if (ArrayType *XATy = 944 dyn_cast<ArrayType>(StrippedPtrTy->getElementType())){ 945 // GEP (bitcast [10 x i8]* X to [0 x i8]*), i32 0, ... ? 946 if (CATy->getElementType() == XATy->getElementType()) { 947 // -> GEP [10 x i8]* X, i32 0, ... 948 // At this point, we know that the cast source type is a pointer 949 // to an array of the same type as the destination pointer 950 // array. Because the array type is never stepped over (there 951 // is a leading zero) we can fold the cast into this GEP. 952 GEP.setOperand(0, StrippedPtr); 953 return &GEP; 954 } 955 } 956 } 957 } else if (GEP.getNumOperands() == 2) { 958 // Transform things like: 959 // %t = getelementptr i32* bitcast ([2 x i32]* %str to i32*), i32 %V 960 // into: %t1 = getelementptr [2 x i32]* %str, i32 0, i32 %V; bitcast 961 Type *SrcElTy = StrippedPtrTy->getElementType(); 962 Type *ResElTy=cast<PointerType>(PtrOp->getType())->getElementType(); 963 if (TD && SrcElTy->isArrayTy() && 964 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()) == 965 TD->getTypeAllocSize(ResElTy)) { 966 Value *Idx[2]; 967 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 968 Idx[1] = GEP.getOperand(1); 969 Value *NewGEP = GEP.isInBounds() ? 970 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()) : 971 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); 972 // V and GEP are both pointer types --> BitCast 973 return new BitCastInst(NewGEP, GEP.getType()); 974 } 975 976 // Transform things like: 977 // getelementptr i8* bitcast ([100 x double]* X to i8*), i32 %tmp 978 // (where tmp = 8*tmp2) into: 979 // getelementptr [100 x double]* %arr, i32 0, i32 %tmp2; bitcast 980 981 if (TD && SrcElTy->isArrayTy() && ResElTy->isIntegerTy(8)) { 982 uint64_t ArrayEltSize = 983 TD->getTypeAllocSize(cast<ArrayType>(SrcElTy)->getElementType()); 984 985 // Check to see if "tmp" is a scale by a multiple of ArrayEltSize. We 986 // allow either a mul, shift, or constant here. 987 Value *NewIdx = 0; 988 ConstantInt *Scale = 0; 989 if (ArrayEltSize == 1) { 990 NewIdx = GEP.getOperand(1); 991 Scale = ConstantInt::get(cast<IntegerType>(NewIdx->getType()), 1); 992 } else if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP.getOperand(1))) { 993 NewIdx = ConstantInt::get(CI->getType(), 1); 994 Scale = CI; 995 } else if (Instruction *Inst =dyn_cast<Instruction>(GEP.getOperand(1))){ 996 if (Inst->getOpcode() == Instruction::Shl && 997 isa<ConstantInt>(Inst->getOperand(1))) { 998 ConstantInt *ShAmt = cast<ConstantInt>(Inst->getOperand(1)); 999 uint32_t ShAmtVal = ShAmt->getLimitedValue(64); 1000 Scale = ConstantInt::get(cast<IntegerType>(Inst->getType()), 1001 1ULL << ShAmtVal); 1002 NewIdx = Inst->getOperand(0); 1003 } else if (Inst->getOpcode() == Instruction::Mul && 1004 isa<ConstantInt>(Inst->getOperand(1))) { 1005 Scale = cast<ConstantInt>(Inst->getOperand(1)); 1006 NewIdx = Inst->getOperand(0); 1007 } 1008 } 1009 1010 // If the index will be to exactly the right offset with the scale taken 1011 // out, perform the transformation. Note, we don't know whether Scale is 1012 // signed or not. We'll use unsigned version of division/modulo 1013 // operation after making sure Scale doesn't have the sign bit set. 1014 if (ArrayEltSize && Scale && Scale->getSExtValue() >= 0LL && 1015 Scale->getZExtValue() % ArrayEltSize == 0) { 1016 Scale = ConstantInt::get(Scale->getType(), 1017 Scale->getZExtValue() / ArrayEltSize); 1018 if (Scale->getZExtValue() != 1) { 1019 Constant *C = ConstantExpr::getIntegerCast(Scale, NewIdx->getType(), 1020 false /*ZExt*/); 1021 NewIdx = Builder->CreateMul(NewIdx, C, "idxscale"); 1022 } 1023 1024 // Insert the new GEP instruction. 1025 Value *Idx[2]; 1026 Idx[0] = Constant::getNullValue(Type::getInt32Ty(GEP.getContext())); 1027 Idx[1] = NewIdx; 1028 Value *NewGEP = GEP.isInBounds() ? 1029 Builder->CreateInBoundsGEP(StrippedPtr, Idx, GEP.getName()): 1030 Builder->CreateGEP(StrippedPtr, Idx, GEP.getName()); 1031 // The NewGEP must be pointer typed, so must the old one -> BitCast 1032 return new BitCastInst(NewGEP, GEP.getType()); 1033 } 1034 } 1035 } 1036 } 1037 1038 /// See if we can simplify: 1039 /// X = bitcast A* to B* 1040 /// Y = gep X, <...constant indices...> 1041 /// into a gep of the original struct. This is important for SROA and alias 1042 /// analysis of unions. If "A" is also a bitcast, wait for A/X to be merged. 1043 if (BitCastInst *BCI = dyn_cast<BitCastInst>(PtrOp)) { 1044 if (TD && 1045 !isa<BitCastInst>(BCI->getOperand(0)) && GEP.hasAllConstantIndices() && 1046 StrippedPtrTy->getAddressSpace() == GEP.getPointerAddressSpace()) { 1047 1048 // Determine how much the GEP moves the pointer. We are guaranteed to get 1049 // a constant back from EmitGEPOffset. 1050 ConstantInt *OffsetV = cast<ConstantInt>(EmitGEPOffset(&GEP)); 1051 int64_t Offset = OffsetV->getSExtValue(); 1052 1053 // If this GEP instruction doesn't move the pointer, just replace the GEP 1054 // with a bitcast of the real input to the dest type. 1055 if (Offset == 0) { 1056 // If the bitcast is of an allocation, and the allocation will be 1057 // converted to match the type of the cast, don't touch this. 1058 if (isa<AllocaInst>(BCI->getOperand(0)) || 1059 isMalloc(BCI->getOperand(0))) { 1060 // See if the bitcast simplifies, if so, don't nuke this GEP yet. 1061 if (Instruction *I = visitBitCast(*BCI)) { 1062 if (I != BCI) { 1063 I->takeName(BCI); 1064 BCI->getParent()->getInstList().insert(BCI, I); 1065 ReplaceInstUsesWith(*BCI, I); 1066 } 1067 return &GEP; 1068 } 1069 } 1070 return new BitCastInst(BCI->getOperand(0), GEP.getType()); 1071 } 1072 1073 // Otherwise, if the offset is non-zero, we need to find out if there is a 1074 // field at Offset in 'A's type. If so, we can pull the cast through the 1075 // GEP. 1076 SmallVector<Value*, 8> NewIndices; 1077 Type *InTy = 1078 cast<PointerType>(BCI->getOperand(0)->getType())->getElementType(); 1079 if (FindElementAtOffset(InTy, Offset, NewIndices)) { 1080 Value *NGEP = GEP.isInBounds() ? 1081 Builder->CreateInBoundsGEP(BCI->getOperand(0), NewIndices) : 1082 Builder->CreateGEP(BCI->getOperand(0), NewIndices); 1083 1084 if (NGEP->getType() == GEP.getType()) 1085 return ReplaceInstUsesWith(GEP, NGEP); 1086 NGEP->takeName(&GEP); 1087 return new BitCastInst(NGEP, GEP.getType()); 1088 } 1089 } 1090 } 1091 1092 return 0; 1093 } 1094 1095 1096 1097 static bool IsOnlyNullComparedAndFreed(Value *V, SmallVectorImpl<WeakVH> &Users, 1098 int Depth = 0) { 1099 if (Depth == 8) 1100 return false; 1101 1102 for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); 1103 UI != UE; ++UI) { 1104 User *U = *UI; 1105 if (isFreeCall(U)) { 1106 Users.push_back(U); 1107 continue; 1108 } 1109 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) { 1110 if (ICI->isEquality() && isa<ConstantPointerNull>(ICI->getOperand(1))) { 1111 Users.push_back(ICI); 1112 continue; 1113 } 1114 } 1115 if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 1116 if (IsOnlyNullComparedAndFreed(BCI, Users, Depth+1)) { 1117 Users.push_back(BCI); 1118 continue; 1119 } 1120 } 1121 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 1122 if (IsOnlyNullComparedAndFreed(GEPI, Users, Depth+1)) { 1123 Users.push_back(GEPI); 1124 continue; 1125 } 1126 } 1127 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 1128 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 1129 II->getIntrinsicID() == Intrinsic::lifetime_end) { 1130 Users.push_back(II); 1131 continue; 1132 } 1133 } 1134 return false; 1135 } 1136 return true; 1137 } 1138 1139 Instruction *InstCombiner::visitMalloc(Instruction &MI) { 1140 // If we have a malloc call which is only used in any amount of comparisons 1141 // to null and free calls, delete the calls and replace the comparisons with 1142 // true or false as appropriate. 1143 SmallVector<WeakVH, 64> Users; 1144 if (IsOnlyNullComparedAndFreed(&MI, Users)) { 1145 for (unsigned i = 0, e = Users.size(); i != e; ++i) { 1146 Instruction *I = cast_or_null<Instruction>(&*Users[i]); 1147 if (!I) continue; 1148 1149 if (ICmpInst *C = dyn_cast<ICmpInst>(I)) { 1150 ReplaceInstUsesWith(*C, 1151 ConstantInt::get(Type::getInt1Ty(C->getContext()), 1152 C->isFalseWhenEqual())); 1153 } else if (isa<BitCastInst>(I) || isa<GetElementPtrInst>(I)) { 1154 ReplaceInstUsesWith(*I, UndefValue::get(I->getType())); 1155 } 1156 EraseInstFromFunction(*I); 1157 } 1158 return EraseInstFromFunction(MI); 1159 } 1160 return 0; 1161 } 1162 1163 1164 1165 Instruction *InstCombiner::visitFree(CallInst &FI) { 1166 Value *Op = FI.getArgOperand(0); 1167 1168 // free undef -> unreachable. 1169 if (isa<UndefValue>(Op)) { 1170 // Insert a new store to null because we cannot modify the CFG here. 1171 Builder->CreateStore(ConstantInt::getTrue(FI.getContext()), 1172 UndefValue::get(Type::getInt1PtrTy(FI.getContext()))); 1173 return EraseInstFromFunction(FI); 1174 } 1175 1176 // If we have 'free null' delete the instruction. This can happen in stl code 1177 // when lots of inlining happens. 1178 if (isa<ConstantPointerNull>(Op)) 1179 return EraseInstFromFunction(FI); 1180 1181 return 0; 1182 } 1183 1184 1185 1186 Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { 1187 // Change br (not X), label True, label False to: br X, label False, True 1188 Value *X = 0; 1189 BasicBlock *TrueDest; 1190 BasicBlock *FalseDest; 1191 if (match(&BI, m_Br(m_Not(m_Value(X)), TrueDest, FalseDest)) && 1192 !isa<Constant>(X)) { 1193 // Swap Destinations and condition... 1194 BI.setCondition(X); 1195 BI.swapSuccessors(); 1196 return &BI; 1197 } 1198 1199 // Cannonicalize fcmp_one -> fcmp_oeq 1200 FCmpInst::Predicate FPred; Value *Y; 1201 if (match(&BI, m_Br(m_FCmp(FPred, m_Value(X), m_Value(Y)), 1202 TrueDest, FalseDest)) && 1203 BI.getCondition()->hasOneUse()) 1204 if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || 1205 FPred == FCmpInst::FCMP_OGE) { 1206 FCmpInst *Cond = cast<FCmpInst>(BI.getCondition()); 1207 Cond->setPredicate(FCmpInst::getInversePredicate(FPred)); 1208 1209 // Swap Destinations and condition. 1210 BI.swapSuccessors(); 1211 Worklist.Add(Cond); 1212 return &BI; 1213 } 1214 1215 // Cannonicalize icmp_ne -> icmp_eq 1216 ICmpInst::Predicate IPred; 1217 if (match(&BI, m_Br(m_ICmp(IPred, m_Value(X), m_Value(Y)), 1218 TrueDest, FalseDest)) && 1219 BI.getCondition()->hasOneUse()) 1220 if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || 1221 IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || 1222 IPred == ICmpInst::ICMP_SGE) { 1223 ICmpInst *Cond = cast<ICmpInst>(BI.getCondition()); 1224 Cond->setPredicate(ICmpInst::getInversePredicate(IPred)); 1225 // Swap Destinations and condition. 1226 BI.swapSuccessors(); 1227 Worklist.Add(Cond); 1228 return &BI; 1229 } 1230 1231 return 0; 1232 } 1233 1234 Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) { 1235 Value *Cond = SI.getCondition(); 1236 if (Instruction *I = dyn_cast<Instruction>(Cond)) { 1237 if (I->getOpcode() == Instruction::Add) 1238 if (ConstantInt *AddRHS = dyn_cast<ConstantInt>(I->getOperand(1))) { 1239 // change 'switch (X+4) case 1:' into 'switch (X) case -3' 1240 unsigned NumCases = SI.getNumCases(); 1241 // Skip the first item since that's the default case. 1242 for (unsigned i = 1; i < NumCases; ++i) { 1243 ConstantInt* CaseVal = SI.getCaseValue(i); 1244 Constant* NewCaseVal = ConstantExpr::getSub(cast<Constant>(CaseVal), 1245 AddRHS); 1246 assert(isa<ConstantInt>(NewCaseVal) && 1247 "Result of expression should be constant"); 1248 SI.setSuccessorValue(i, cast<ConstantInt>(NewCaseVal)); 1249 } 1250 SI.setCondition(I->getOperand(0)); 1251 Worklist.Add(I); 1252 return &SI; 1253 } 1254 } 1255 return 0; 1256 } 1257 1258 Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { 1259 Value *Agg = EV.getAggregateOperand(); 1260 1261 if (!EV.hasIndices()) 1262 return ReplaceInstUsesWith(EV, Agg); 1263 1264 if (Constant *C = dyn_cast<Constant>(Agg)) { 1265 if (isa<UndefValue>(C)) 1266 return ReplaceInstUsesWith(EV, UndefValue::get(EV.getType())); 1267 1268 if (isa<ConstantAggregateZero>(C)) 1269 return ReplaceInstUsesWith(EV, Constant::getNullValue(EV.getType())); 1270 1271 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C)) { 1272 // Extract the element indexed by the first index out of the constant 1273 Value *V = C->getOperand(*EV.idx_begin()); 1274 if (EV.getNumIndices() > 1) 1275 // Extract the remaining indices out of the constant indexed by the 1276 // first index 1277 return ExtractValueInst::Create(V, EV.getIndices().slice(1)); 1278 else 1279 return ReplaceInstUsesWith(EV, V); 1280 } 1281 return 0; // Can't handle other constants 1282 } 1283 if (InsertValueInst *IV = dyn_cast<InsertValueInst>(Agg)) { 1284 // We're extracting from an insertvalue instruction, compare the indices 1285 const unsigned *exti, *exte, *insi, *inse; 1286 for (exti = EV.idx_begin(), insi = IV->idx_begin(), 1287 exte = EV.idx_end(), inse = IV->idx_end(); 1288 exti != exte && insi != inse; 1289 ++exti, ++insi) { 1290 if (*insi != *exti) 1291 // The insert and extract both reference distinctly different elements. 1292 // This means the extract is not influenced by the insert, and we can 1293 // replace the aggregate operand of the extract with the aggregate 1294 // operand of the insert. i.e., replace 1295 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1296 // %E = extractvalue { i32, { i32 } } %I, 0 1297 // with 1298 // %E = extractvalue { i32, { i32 } } %A, 0 1299 return ExtractValueInst::Create(IV->getAggregateOperand(), 1300 EV.getIndices()); 1301 } 1302 if (exti == exte && insi == inse) 1303 // Both iterators are at the end: Index lists are identical. Replace 1304 // %B = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1305 // %C = extractvalue { i32, { i32 } } %B, 1, 0 1306 // with "i32 42" 1307 return ReplaceInstUsesWith(EV, IV->getInsertedValueOperand()); 1308 if (exti == exte) { 1309 // The extract list is a prefix of the insert list. i.e. replace 1310 // %I = insertvalue { i32, { i32 } } %A, i32 42, 1, 0 1311 // %E = extractvalue { i32, { i32 } } %I, 1 1312 // with 1313 // %X = extractvalue { i32, { i32 } } %A, 1 1314 // %E = insertvalue { i32 } %X, i32 42, 0 1315 // by switching the order of the insert and extract (though the 1316 // insertvalue should be left in, since it may have other uses). 1317 Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(), 1318 EV.getIndices()); 1319 return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), 1320 makeArrayRef(insi, inse)); 1321 } 1322 if (insi == inse) 1323 // The insert list is a prefix of the extract list 1324 // We can simply remove the common indices from the extract and make it 1325 // operate on the inserted value instead of the insertvalue result. 1326 // i.e., replace 1327 // %I = insertvalue { i32, { i32 } } %A, { i32 } { i32 42 }, 1 1328 // %E = extractvalue { i32, { i32 } } %I, 1, 0 1329 // with 1330 // %E extractvalue { i32 } { i32 42 }, 0 1331 return ExtractValueInst::Create(IV->getInsertedValueOperand(), 1332 makeArrayRef(exti, exte)); 1333 } 1334 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Agg)) { 1335 // We're extracting from an intrinsic, see if we're the only user, which 1336 // allows us to simplify multiple result intrinsics to simpler things that 1337 // just get one value. 1338 if (II->hasOneUse()) { 1339 // Check if we're grabbing the overflow bit or the result of a 'with 1340 // overflow' intrinsic. If it's the latter we can remove the intrinsic 1341 // and replace it with a traditional binary instruction. 1342 switch (II->getIntrinsicID()) { 1343 case Intrinsic::uadd_with_overflow: 1344 case Intrinsic::sadd_with_overflow: 1345 if (*EV.idx_begin() == 0) { // Normal result. 1346 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1347 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 1348 EraseInstFromFunction(*II); 1349 return BinaryOperator::CreateAdd(LHS, RHS); 1350 } 1351 1352 // If the normal result of the add is dead, and the RHS is a constant, 1353 // we can transform this into a range comparison. 1354 // overflow = uadd a, -4 --> overflow = icmp ugt a, 3 1355 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow) 1356 if (ConstantInt *CI = dyn_cast<ConstantInt>(II->getArgOperand(1))) 1357 return new ICmpInst(ICmpInst::ICMP_UGT, II->getArgOperand(0), 1358 ConstantExpr::getNot(CI)); 1359 break; 1360 case Intrinsic::usub_with_overflow: 1361 case Intrinsic::ssub_with_overflow: 1362 if (*EV.idx_begin() == 0) { // Normal result. 1363 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1364 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 1365 EraseInstFromFunction(*II); 1366 return BinaryOperator::CreateSub(LHS, RHS); 1367 } 1368 break; 1369 case Intrinsic::umul_with_overflow: 1370 case Intrinsic::smul_with_overflow: 1371 if (*EV.idx_begin() == 0) { // Normal result. 1372 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); 1373 ReplaceInstUsesWith(*II, UndefValue::get(II->getType())); 1374 EraseInstFromFunction(*II); 1375 return BinaryOperator::CreateMul(LHS, RHS); 1376 } 1377 break; 1378 default: 1379 break; 1380 } 1381 } 1382 } 1383 if (LoadInst *L = dyn_cast<LoadInst>(Agg)) 1384 // If the (non-volatile) load only has one use, we can rewrite this to a 1385 // load from a GEP. This reduces the size of the load. 1386 // FIXME: If a load is used only by extractvalue instructions then this 1387 // could be done regardless of having multiple uses. 1388 if (L->isSimple() && L->hasOneUse()) { 1389 // extractvalue has integer indices, getelementptr has Value*s. Convert. 1390 SmallVector<Value*, 4> Indices; 1391 // Prefix an i32 0 since we need the first element. 1392 Indices.push_back(Builder->getInt32(0)); 1393 for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end(); 1394 I != E; ++I) 1395 Indices.push_back(Builder->getInt32(*I)); 1396 1397 // We need to insert these at the location of the old load, not at that of 1398 // the extractvalue. 1399 Builder->SetInsertPoint(L->getParent(), L); 1400 Value *GEP = Builder->CreateInBoundsGEP(L->getPointerOperand(), Indices); 1401 // Returning the load directly will cause the main loop to insert it in 1402 // the wrong spot, so use ReplaceInstUsesWith(). 1403 return ReplaceInstUsesWith(EV, Builder->CreateLoad(GEP)); 1404 } 1405 // We could simplify extracts from other values. Note that nested extracts may 1406 // already be simplified implicitly by the above: extract (extract (insert) ) 1407 // will be translated into extract ( insert ( extract ) ) first and then just 1408 // the value inserted, if appropriate. Similarly for extracts from single-use 1409 // loads: extract (extract (load)) will be translated to extract (load (gep)) 1410 // and if again single-use then via load (gep (gep)) to load (gep). 1411 // However, double extracts from e.g. function arguments or return values 1412 // aren't handled yet. 1413 return 0; 1414 } 1415 1416 enum Personality_Type { 1417 Unknown_Personality, 1418 GNU_Ada_Personality, 1419 GNU_CXX_Personality, 1420 GNU_ObjC_Personality 1421 }; 1422 1423 /// RecognizePersonality - See if the given exception handling personality 1424 /// function is one that we understand. If so, return a description of it; 1425 /// otherwise return Unknown_Personality. 1426 static Personality_Type RecognizePersonality(Value *Pers) { 1427 Function *F = dyn_cast<Function>(Pers->stripPointerCasts()); 1428 if (!F) 1429 return Unknown_Personality; 1430 return StringSwitch<Personality_Type>(F->getName()) 1431 .Case("__gnat_eh_personality", GNU_Ada_Personality) 1432 .Case("__gxx_personality_v0", GNU_CXX_Personality) 1433 .Case("__objc_personality_v0", GNU_ObjC_Personality) 1434 .Default(Unknown_Personality); 1435 } 1436 1437 /// isCatchAll - Return 'true' if the given typeinfo will match anything. 1438 static bool isCatchAll(Personality_Type Personality, Constant *TypeInfo) { 1439 switch (Personality) { 1440 case Unknown_Personality: 1441 return false; 1442 case GNU_Ada_Personality: 1443 // While __gnat_all_others_value will match any Ada exception, it doesn't 1444 // match foreign exceptions (or didn't, before gcc-4.7). 1445 return false; 1446 case GNU_CXX_Personality: 1447 case GNU_ObjC_Personality: 1448 return TypeInfo->isNullValue(); 1449 } 1450 llvm_unreachable("Unknown personality!"); 1451 } 1452 1453 static bool shorter_filter(const Value *LHS, const Value *RHS) { 1454 return 1455 cast<ArrayType>(LHS->getType())->getNumElements() 1456 < 1457 cast<ArrayType>(RHS->getType())->getNumElements(); 1458 } 1459 1460 Instruction *InstCombiner::visitLandingPadInst(LandingPadInst &LI) { 1461 // The logic here should be correct for any real-world personality function. 1462 // However if that turns out not to be true, the offending logic can always 1463 // be conditioned on the personality function, like the catch-all logic is. 1464 Personality_Type Personality = RecognizePersonality(LI.getPersonalityFn()); 1465 1466 // Simplify the list of clauses, eg by removing repeated catch clauses 1467 // (these are often created by inlining). 1468 bool MakeNewInstruction = false; // If true, recreate using the following: 1469 SmallVector<Value *, 16> NewClauses; // - Clauses for the new instruction; 1470 bool CleanupFlag = LI.isCleanup(); // - The new instruction is a cleanup. 1471 1472 SmallPtrSet<Value *, 16> AlreadyCaught; // Typeinfos known caught already. 1473 for (unsigned i = 0, e = LI.getNumClauses(); i != e; ++i) { 1474 bool isLastClause = i + 1 == e; 1475 if (LI.isCatch(i)) { 1476 // A catch clause. 1477 Value *CatchClause = LI.getClause(i); 1478 Constant *TypeInfo = cast<Constant>(CatchClause->stripPointerCasts()); 1479 1480 // If we already saw this clause, there is no point in having a second 1481 // copy of it. 1482 if (AlreadyCaught.insert(TypeInfo)) { 1483 // This catch clause was not already seen. 1484 NewClauses.push_back(CatchClause); 1485 } else { 1486 // Repeated catch clause - drop the redundant copy. 1487 MakeNewInstruction = true; 1488 } 1489 1490 // If this is a catch-all then there is no point in keeping any following 1491 // clauses or marking the landingpad as having a cleanup. 1492 if (isCatchAll(Personality, TypeInfo)) { 1493 if (!isLastClause) 1494 MakeNewInstruction = true; 1495 CleanupFlag = false; 1496 break; 1497 } 1498 } else { 1499 // A filter clause. If any of the filter elements were already caught 1500 // then they can be dropped from the filter. It is tempting to try to 1501 // exploit the filter further by saying that any typeinfo that does not 1502 // occur in the filter can't be caught later (and thus can be dropped). 1503 // However this would be wrong, since typeinfos can match without being 1504 // equal (for example if one represents a C++ class, and the other some 1505 // class derived from it). 1506 assert(LI.isFilter(i) && "Unsupported landingpad clause!"); 1507 Value *FilterClause = LI.getClause(i); 1508 ArrayType *FilterType = cast<ArrayType>(FilterClause->getType()); 1509 unsigned NumTypeInfos = FilterType->getNumElements(); 1510 1511 // An empty filter catches everything, so there is no point in keeping any 1512 // following clauses or marking the landingpad as having a cleanup. By 1513 // dealing with this case here the following code is made a bit simpler. 1514 if (!NumTypeInfos) { 1515 NewClauses.push_back(FilterClause); 1516 if (!isLastClause) 1517 MakeNewInstruction = true; 1518 CleanupFlag = false; 1519 break; 1520 } 1521 1522 bool MakeNewFilter = false; // If true, make a new filter. 1523 SmallVector<Constant *, 16> NewFilterElts; // New elements. 1524 if (isa<ConstantAggregateZero>(FilterClause)) { 1525 // Not an empty filter - it contains at least one null typeinfo. 1526 assert(NumTypeInfos > 0 && "Should have handled empty filter already!"); 1527 Constant *TypeInfo = 1528 Constant::getNullValue(FilterType->getElementType()); 1529 // If this typeinfo is a catch-all then the filter can never match. 1530 if (isCatchAll(Personality, TypeInfo)) { 1531 // Throw the filter away. 1532 MakeNewInstruction = true; 1533 continue; 1534 } 1535 1536 // There is no point in having multiple copies of this typeinfo, so 1537 // discard all but the first copy if there is more than one. 1538 NewFilterElts.push_back(TypeInfo); 1539 if (NumTypeInfos > 1) 1540 MakeNewFilter = true; 1541 } else { 1542 ConstantArray *Filter = cast<ConstantArray>(FilterClause); 1543 SmallPtrSet<Value *, 16> SeenInFilter; // For uniquing the elements. 1544 NewFilterElts.reserve(NumTypeInfos); 1545 1546 // Remove any filter elements that were already caught or that already 1547 // occurred in the filter. While there, see if any of the elements are 1548 // catch-alls. If so, the filter can be discarded. 1549 bool SawCatchAll = false; 1550 for (unsigned j = 0; j != NumTypeInfos; ++j) { 1551 Value *Elt = Filter->getOperand(j); 1552 Constant *TypeInfo = cast<Constant>(Elt->stripPointerCasts()); 1553 if (isCatchAll(Personality, TypeInfo)) { 1554 // This element is a catch-all. Bail out, noting this fact. 1555 SawCatchAll = true; 1556 break; 1557 } 1558 if (AlreadyCaught.count(TypeInfo)) 1559 // Already caught by an earlier clause, so having it in the filter 1560 // is pointless. 1561 continue; 1562 // There is no point in having multiple copies of the same typeinfo in 1563 // a filter, so only add it if we didn't already. 1564 if (SeenInFilter.insert(TypeInfo)) 1565 NewFilterElts.push_back(cast<Constant>(Elt)); 1566 } 1567 // A filter containing a catch-all cannot match anything by definition. 1568 if (SawCatchAll) { 1569 // Throw the filter away. 1570 MakeNewInstruction = true; 1571 continue; 1572 } 1573 1574 // If we dropped something from the filter, make a new one. 1575 if (NewFilterElts.size() < NumTypeInfos) 1576 MakeNewFilter = true; 1577 } 1578 if (MakeNewFilter) { 1579 FilterType = ArrayType::get(FilterType->getElementType(), 1580 NewFilterElts.size()); 1581 FilterClause = ConstantArray::get(FilterType, NewFilterElts); 1582 MakeNewInstruction = true; 1583 } 1584 1585 NewClauses.push_back(FilterClause); 1586 1587 // If the new filter is empty then it will catch everything so there is 1588 // no point in keeping any following clauses or marking the landingpad 1589 // as having a cleanup. The case of the original filter being empty was 1590 // already handled above. 1591 if (MakeNewFilter && !NewFilterElts.size()) { 1592 assert(MakeNewInstruction && "New filter but not a new instruction!"); 1593 CleanupFlag = false; 1594 break; 1595 } 1596 } 1597 } 1598 1599 // If several filters occur in a row then reorder them so that the shortest 1600 // filters come first (those with the smallest number of elements). This is 1601 // advantageous because shorter filters are more likely to match, speeding up 1602 // unwinding, but mostly because it increases the effectiveness of the other 1603 // filter optimizations below. 1604 for (unsigned i = 0, e = NewClauses.size(); i + 1 < e; ) { 1605 unsigned j; 1606 // Find the maximal 'j' s.t. the range [i, j) consists entirely of filters. 1607 for (j = i; j != e; ++j) 1608 if (!isa<ArrayType>(NewClauses[j]->getType())) 1609 break; 1610 1611 // Check whether the filters are already sorted by length. We need to know 1612 // if sorting them is actually going to do anything so that we only make a 1613 // new landingpad instruction if it does. 1614 for (unsigned k = i; k + 1 < j; ++k) 1615 if (shorter_filter(NewClauses[k+1], NewClauses[k])) { 1616 // Not sorted, so sort the filters now. Doing an unstable sort would be 1617 // correct too but reordering filters pointlessly might confuse users. 1618 std::stable_sort(NewClauses.begin() + i, NewClauses.begin() + j, 1619 shorter_filter); 1620 MakeNewInstruction = true; 1621 break; 1622 } 1623 1624 // Look for the next batch of filters. 1625 i = j + 1; 1626 } 1627 1628 // If typeinfos matched if and only if equal, then the elements of a filter L 1629 // that occurs later than a filter F could be replaced by the intersection of 1630 // the elements of F and L. In reality two typeinfos can match without being 1631 // equal (for example if one represents a C++ class, and the other some class 1632 // derived from it) so it would be wrong to perform this transform in general. 1633 // However the transform is correct and useful if F is a subset of L. In that 1634 // case L can be replaced by F, and thus removed altogether since repeating a 1635 // filter is pointless. So here we look at all pairs of filters F and L where 1636 // L follows F in the list of clauses, and remove L if every element of F is 1637 // an element of L. This can occur when inlining C++ functions with exception 1638 // specifications. 1639 for (unsigned i = 0; i + 1 < NewClauses.size(); ++i) { 1640 // Examine each filter in turn. 1641 Value *Filter = NewClauses[i]; 1642 ArrayType *FTy = dyn_cast<ArrayType>(Filter->getType()); 1643 if (!FTy) 1644 // Not a filter - skip it. 1645 continue; 1646 unsigned FElts = FTy->getNumElements(); 1647 // Examine each filter following this one. Doing this backwards means that 1648 // we don't have to worry about filters disappearing under us when removed. 1649 for (unsigned j = NewClauses.size() - 1; j != i; --j) { 1650 Value *LFilter = NewClauses[j]; 1651 ArrayType *LTy = dyn_cast<ArrayType>(LFilter->getType()); 1652 if (!LTy) 1653 // Not a filter - skip it. 1654 continue; 1655 // If Filter is a subset of LFilter, i.e. every element of Filter is also 1656 // an element of LFilter, then discard LFilter. 1657 SmallVector<Value *, 16>::iterator J = NewClauses.begin() + j; 1658 // If Filter is empty then it is a subset of LFilter. 1659 if (!FElts) { 1660 // Discard LFilter. 1661 NewClauses.erase(J); 1662 MakeNewInstruction = true; 1663 // Move on to the next filter. 1664 continue; 1665 } 1666 unsigned LElts = LTy->getNumElements(); 1667 // If Filter is longer than LFilter then it cannot be a subset of it. 1668 if (FElts > LElts) 1669 // Move on to the next filter. 1670 continue; 1671 // At this point we know that LFilter has at least one element. 1672 if (isa<ConstantAggregateZero>(LFilter)) { // LFilter only contains zeros. 1673 // Filter is a subset of LFilter iff Filter contains only zeros (as we 1674 // already know that Filter is not longer than LFilter). 1675 if (isa<ConstantAggregateZero>(Filter)) { 1676 assert(FElts <= LElts && "Should have handled this case earlier!"); 1677 // Discard LFilter. 1678 NewClauses.erase(J); 1679 MakeNewInstruction = true; 1680 } 1681 // Move on to the next filter. 1682 continue; 1683 } 1684 ConstantArray *LArray = cast<ConstantArray>(LFilter); 1685 if (isa<ConstantAggregateZero>(Filter)) { // Filter only contains zeros. 1686 // Since Filter is non-empty and contains only zeros, it is a subset of 1687 // LFilter iff LFilter contains a zero. 1688 assert(FElts > 0 && "Should have eliminated the empty filter earlier!"); 1689 for (unsigned l = 0; l != LElts; ++l) 1690 if (LArray->getOperand(l)->isNullValue()) { 1691 // LFilter contains a zero - discard it. 1692 NewClauses.erase(J); 1693 MakeNewInstruction = true; 1694 break; 1695 } 1696 // Move on to the next filter. 1697 continue; 1698 } 1699 // At this point we know that both filters are ConstantArrays. Loop over 1700 // operands to see whether every element of Filter is also an element of 1701 // LFilter. Since filters tend to be short this is probably faster than 1702 // using a method that scales nicely. 1703 ConstantArray *FArray = cast<ConstantArray>(Filter); 1704 bool AllFound = true; 1705 for (unsigned f = 0; f != FElts; ++f) { 1706 Value *FTypeInfo = FArray->getOperand(f)->stripPointerCasts(); 1707 AllFound = false; 1708 for (unsigned l = 0; l != LElts; ++l) { 1709 Value *LTypeInfo = LArray->getOperand(l)->stripPointerCasts(); 1710 if (LTypeInfo == FTypeInfo) { 1711 AllFound = true; 1712 break; 1713 } 1714 } 1715 if (!AllFound) 1716 break; 1717 } 1718 if (AllFound) { 1719 // Discard LFilter. 1720 NewClauses.erase(J); 1721 MakeNewInstruction = true; 1722 } 1723 // Move on to the next filter. 1724 } 1725 } 1726 1727 // If we changed any of the clauses, replace the old landingpad instruction 1728 // with a new one. 1729 if (MakeNewInstruction) { 1730 LandingPadInst *NLI = LandingPadInst::Create(LI.getType(), 1731 LI.getPersonalityFn(), 1732 NewClauses.size()); 1733 for (unsigned i = 0, e = NewClauses.size(); i != e; ++i) 1734 NLI->addClause(NewClauses[i]); 1735 // A landing pad with no clauses must have the cleanup flag set. It is 1736 // theoretically possible, though highly unlikely, that we eliminated all 1737 // clauses. If so, force the cleanup flag to true. 1738 if (NewClauses.empty()) 1739 CleanupFlag = true; 1740 NLI->setCleanup(CleanupFlag); 1741 return NLI; 1742 } 1743 1744 // Even if none of the clauses changed, we may nonetheless have understood 1745 // that the cleanup flag is pointless. Clear it if so. 1746 if (LI.isCleanup() != CleanupFlag) { 1747 assert(!CleanupFlag && "Adding a cleanup, not removing one?!"); 1748 LI.setCleanup(CleanupFlag); 1749 return &LI; 1750 } 1751 1752 return 0; 1753 } 1754 1755 1756 1757 1758 /// TryToSinkInstruction - Try to move the specified instruction from its 1759 /// current block into the beginning of DestBlock, which can only happen if it's 1760 /// safe to move the instruction past all of the instructions between it and the 1761 /// end of its block. 1762 static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) { 1763 assert(I->hasOneUse() && "Invariants didn't hold!"); 1764 1765 // Cannot move control-flow-involving, volatile loads, vaarg, etc. 1766 if (isa<PHINode>(I) || isa<LandingPadInst>(I) || I->mayHaveSideEffects() || 1767 isa<TerminatorInst>(I)) 1768 return false; 1769 1770 // Do not sink alloca instructions out of the entry block. 1771 if (isa<AllocaInst>(I) && I->getParent() == 1772 &DestBlock->getParent()->getEntryBlock()) 1773 return false; 1774 1775 // We can only sink load instructions if there is nothing between the load and 1776 // the end of block that could change the value. 1777 if (I->mayReadFromMemory()) { 1778 for (BasicBlock::iterator Scan = I, E = I->getParent()->end(); 1779 Scan != E; ++Scan) 1780 if (Scan->mayWriteToMemory()) 1781 return false; 1782 } 1783 1784 BasicBlock::iterator InsertPos = DestBlock->getFirstInsertionPt(); 1785 I->moveBefore(InsertPos); 1786 ++NumSunkInst; 1787 return true; 1788 } 1789 1790 1791 /// AddReachableCodeToWorklist - Walk the function in depth-first order, adding 1792 /// all reachable code to the worklist. 1793 /// 1794 /// This has a couple of tricks to make the code faster and more powerful. In 1795 /// particular, we constant fold and DCE instructions as we go, to avoid adding 1796 /// them to the worklist (this significantly speeds up instcombine on code where 1797 /// many instructions are dead or constant). Additionally, if we find a branch 1798 /// whose condition is a known constant, we only visit the reachable successors. 1799 /// 1800 static bool AddReachableCodeToWorklist(BasicBlock *BB, 1801 SmallPtrSet<BasicBlock*, 64> &Visited, 1802 InstCombiner &IC, 1803 const TargetData *TD) { 1804 bool MadeIRChange = false; 1805 SmallVector<BasicBlock*, 256> Worklist; 1806 Worklist.push_back(BB); 1807 1808 SmallVector<Instruction*, 128> InstrsForInstCombineWorklist; 1809 DenseMap<ConstantExpr*, Constant*> FoldedConstants; 1810 1811 do { 1812 BB = Worklist.pop_back_val(); 1813 1814 // We have now visited this block! If we've already been here, ignore it. 1815 if (!Visited.insert(BB)) continue; 1816 1817 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) { 1818 Instruction *Inst = BBI++; 1819 1820 // DCE instruction if trivially dead. 1821 if (isInstructionTriviallyDead(Inst)) { 1822 ++NumDeadInst; 1823 DEBUG(errs() << "IC: DCE: " << *Inst << '\n'); 1824 Inst->eraseFromParent(); 1825 continue; 1826 } 1827 1828 // ConstantProp instruction if trivially constant. 1829 if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0))) 1830 if (Constant *C = ConstantFoldInstruction(Inst, TD)) { 1831 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " 1832 << *Inst << '\n'); 1833 Inst->replaceAllUsesWith(C); 1834 ++NumConstProp; 1835 Inst->eraseFromParent(); 1836 continue; 1837 } 1838 1839 if (TD) { 1840 // See if we can constant fold its operands. 1841 for (User::op_iterator i = Inst->op_begin(), e = Inst->op_end(); 1842 i != e; ++i) { 1843 ConstantExpr *CE = dyn_cast<ConstantExpr>(i); 1844 if (CE == 0) continue; 1845 1846 Constant*& FoldRes = FoldedConstants[CE]; 1847 if (!FoldRes) 1848 FoldRes = ConstantFoldConstantExpression(CE, TD); 1849 if (!FoldRes) 1850 FoldRes = CE; 1851 1852 if (FoldRes != CE) { 1853 *i = FoldRes; 1854 MadeIRChange = true; 1855 } 1856 } 1857 } 1858 1859 InstrsForInstCombineWorklist.push_back(Inst); 1860 } 1861 1862 // Recursively visit successors. If this is a branch or switch on a 1863 // constant, only visit the reachable successor. 1864 TerminatorInst *TI = BB->getTerminator(); 1865 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) { 1866 if (BI->isConditional() && isa<ConstantInt>(BI->getCondition())) { 1867 bool CondVal = cast<ConstantInt>(BI->getCondition())->getZExtValue(); 1868 BasicBlock *ReachableBB = BI->getSuccessor(!CondVal); 1869 Worklist.push_back(ReachableBB); 1870 continue; 1871 } 1872 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) { 1873 if (ConstantInt *Cond = dyn_cast<ConstantInt>(SI->getCondition())) { 1874 // See if this is an explicit destination. 1875 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) 1876 if (SI->getCaseValue(i) == Cond) { 1877 BasicBlock *ReachableBB = SI->getSuccessor(i); 1878 Worklist.push_back(ReachableBB); 1879 continue; 1880 } 1881 1882 // Otherwise it is the default destination. 1883 Worklist.push_back(SI->getSuccessor(0)); 1884 continue; 1885 } 1886 } 1887 1888 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 1889 Worklist.push_back(TI->getSuccessor(i)); 1890 } while (!Worklist.empty()); 1891 1892 // Once we've found all of the instructions to add to instcombine's worklist, 1893 // add them in reverse order. This way instcombine will visit from the top 1894 // of the function down. This jives well with the way that it adds all uses 1895 // of instructions to the worklist after doing a transformation, thus avoiding 1896 // some N^2 behavior in pathological cases. 1897 IC.Worklist.AddInitialGroup(&InstrsForInstCombineWorklist[0], 1898 InstrsForInstCombineWorklist.size()); 1899 1900 return MadeIRChange; 1901 } 1902 1903 bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) { 1904 MadeIRChange = false; 1905 1906 DEBUG(errs() << "\n\nINSTCOMBINE ITERATION #" << Iteration << " on " 1907 << F.getName() << "\n"); 1908 1909 { 1910 // Do a depth-first traversal of the function, populate the worklist with 1911 // the reachable instructions. Ignore blocks that are not reachable. Keep 1912 // track of which blocks we visit. 1913 SmallPtrSet<BasicBlock*, 64> Visited; 1914 MadeIRChange |= AddReachableCodeToWorklist(F.begin(), Visited, *this, TD); 1915 1916 // Do a quick scan over the function. If we find any blocks that are 1917 // unreachable, remove any instructions inside of them. This prevents 1918 // the instcombine code from having to deal with some bad special cases. 1919 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { 1920 if (Visited.count(BB)) continue; 1921 1922 // Delete the instructions backwards, as it has a reduced likelihood of 1923 // having to update as many def-use and use-def chains. 1924 Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. 1925 while (EndInst != BB->begin()) { 1926 // Delete the next to last instruction. 1927 BasicBlock::iterator I = EndInst; 1928 Instruction *Inst = --I; 1929 if (!Inst->use_empty()) 1930 Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); 1931 if (isa<LandingPadInst>(Inst)) { 1932 EndInst = Inst; 1933 continue; 1934 } 1935 if (!isa<DbgInfoIntrinsic>(Inst)) { 1936 ++NumDeadInst; 1937 MadeIRChange = true; 1938 } 1939 Inst->eraseFromParent(); 1940 } 1941 } 1942 } 1943 1944 while (!Worklist.isEmpty()) { 1945 Instruction *I = Worklist.RemoveOne(); 1946 if (I == 0) continue; // skip null values. 1947 1948 // Check to see if we can DCE the instruction. 1949 if (isInstructionTriviallyDead(I)) { 1950 DEBUG(errs() << "IC: DCE: " << *I << '\n'); 1951 EraseInstFromFunction(*I); 1952 ++NumDeadInst; 1953 MadeIRChange = true; 1954 continue; 1955 } 1956 1957 // Instruction isn't dead, see if we can constant propagate it. 1958 if (!I->use_empty() && isa<Constant>(I->getOperand(0))) 1959 if (Constant *C = ConstantFoldInstruction(I, TD)) { 1960 DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n'); 1961 1962 // Add operands to the worklist. 1963 ReplaceInstUsesWith(*I, C); 1964 ++NumConstProp; 1965 EraseInstFromFunction(*I); 1966 MadeIRChange = true; 1967 continue; 1968 } 1969 1970 // See if we can trivially sink this instruction to a successor basic block. 1971 if (I->hasOneUse()) { 1972 BasicBlock *BB = I->getParent(); 1973 Instruction *UserInst = cast<Instruction>(I->use_back()); 1974 BasicBlock *UserParent; 1975 1976 // Get the block the use occurs in. 1977 if (PHINode *PN = dyn_cast<PHINode>(UserInst)) 1978 UserParent = PN->getIncomingBlock(I->use_begin().getUse()); 1979 else 1980 UserParent = UserInst->getParent(); 1981 1982 if (UserParent != BB) { 1983 bool UserIsSuccessor = false; 1984 // See if the user is one of our successors. 1985 for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI) 1986 if (*SI == UserParent) { 1987 UserIsSuccessor = true; 1988 break; 1989 } 1990 1991 // If the user is one of our immediate successors, and if that successor 1992 // only has us as a predecessors (we'd have to split the critical edge 1993 // otherwise), we can keep going. 1994 if (UserIsSuccessor && UserParent->getSinglePredecessor()) 1995 // Okay, the CFG is simple enough, try to sink this instruction. 1996 MadeIRChange |= TryToSinkInstruction(I, UserParent); 1997 } 1998 } 1999 2000 // Now that we have an instruction, try combining it to simplify it. 2001 Builder->SetInsertPoint(I->getParent(), I); 2002 Builder->SetCurrentDebugLocation(I->getDebugLoc()); 2003 2004 #ifndef NDEBUG 2005 std::string OrigI; 2006 #endif 2007 DEBUG(raw_string_ostream SS(OrigI); I->print(SS); OrigI = SS.str();); 2008 DEBUG(errs() << "IC: Visiting: " << OrigI << '\n'); 2009 2010 if (Instruction *Result = visit(*I)) { 2011 ++NumCombined; 2012 // Should we replace the old instruction with a new one? 2013 if (Result != I) { 2014 DEBUG(errs() << "IC: Old = " << *I << '\n' 2015 << " New = " << *Result << '\n'); 2016 2017 if (!I->getDebugLoc().isUnknown()) 2018 Result->setDebugLoc(I->getDebugLoc()); 2019 // Everything uses the new instruction now. 2020 I->replaceAllUsesWith(Result); 2021 2022 // Move the name to the new instruction first. 2023 Result->takeName(I); 2024 2025 // Push the new instruction and any users onto the worklist. 2026 Worklist.Add(Result); 2027 Worklist.AddUsersToWorkList(*Result); 2028 2029 // Insert the new instruction into the basic block... 2030 BasicBlock *InstParent = I->getParent(); 2031 BasicBlock::iterator InsertPos = I; 2032 2033 // If we replace a PHI with something that isn't a PHI, fix up the 2034 // insertion point. 2035 if (!isa<PHINode>(Result) && isa<PHINode>(InsertPos)) 2036 InsertPos = InstParent->getFirstInsertionPt(); 2037 2038 InstParent->getInstList().insert(InsertPos, Result); 2039 2040 EraseInstFromFunction(*I); 2041 } else { 2042 #ifndef NDEBUG 2043 DEBUG(errs() << "IC: Mod = " << OrigI << '\n' 2044 << " New = " << *I << '\n'); 2045 #endif 2046 2047 // If the instruction was modified, it's possible that it is now dead. 2048 // if so, remove it. 2049 if (isInstructionTriviallyDead(I)) { 2050 EraseInstFromFunction(*I); 2051 } else { 2052 Worklist.Add(I); 2053 Worklist.AddUsersToWorkList(*I); 2054 } 2055 } 2056 MadeIRChange = true; 2057 } 2058 } 2059 2060 Worklist.Zap(); 2061 return MadeIRChange; 2062 } 2063 2064 2065 bool InstCombiner::runOnFunction(Function &F) { 2066 TD = getAnalysisIfAvailable<TargetData>(); 2067 2068 2069 /// Builder - This is an IRBuilder that automatically inserts new 2070 /// instructions into the worklist when they are created. 2071 IRBuilder<true, TargetFolder, InstCombineIRInserter> 2072 TheBuilder(F.getContext(), TargetFolder(TD), 2073 InstCombineIRInserter(Worklist)); 2074 Builder = &TheBuilder; 2075 2076 bool EverMadeChange = false; 2077 2078 // Lower dbg.declare intrinsics otherwise their value may be clobbered 2079 // by instcombiner. 2080 EverMadeChange = LowerDbgDeclare(F); 2081 2082 // Iterate while there is work to do. 2083 unsigned Iteration = 0; 2084 while (DoOneIteration(F, Iteration++)) 2085 EverMadeChange = true; 2086 2087 Builder = 0; 2088 return EverMadeChange; 2089 } 2090 2091 FunctionPass *llvm::createInstructionCombiningPass() { 2092 return new InstCombiner(); 2093 } 2094