1 //===--- ExpandMemCmp.cpp - Expand memcmp() to load/stores ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass tries to expand memcmp() calls into optimally-sized loads and
10 // compares for the target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/ADT/Statistic.h"
15 #include "llvm/Analysis/ConstantFolding.h"
16 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
17 #include "llvm/Analysis/ProfileSummaryInfo.h"
18 #include "llvm/Analysis/TargetLibraryInfo.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/CodeGen/TargetSubtargetInfo.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/InitializePasses.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 #include "llvm/Transforms/Utils/SizeOpts.h"
28 
29 using namespace llvm;
30 
31 #define DEBUG_TYPE "expandmemcmp"
32 
33 STATISTIC(NumMemCmpCalls, "Number of memcmp calls");
34 STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size");
35 STATISTIC(NumMemCmpGreaterThanMax,
36           "Number of memcmp calls with size greater than max size");
37 STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls");
38 
39 static cl::opt<unsigned> MemCmpEqZeroNumLoadsPerBlock(
40     "memcmp-num-loads-per-block", cl::Hidden, cl::init(1),
41     cl::desc("The number of loads per basic block for inline expansion of "
42              "memcmp that is only being compared against zero."));
43 
44 static cl::opt<unsigned> MaxLoadsPerMemcmp(
45     "max-loads-per-memcmp", cl::Hidden,
46     cl::desc("Set maximum number of loads used in expanded memcmp"));
47 
48 static cl::opt<unsigned> MaxLoadsPerMemcmpOptSize(
49     "max-loads-per-memcmp-opt-size", cl::Hidden,
50     cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"));
51 
52 namespace {
53 
54 
55 // This class provides helper functions to expand a memcmp library call into an
56 // inline expansion.
57 class MemCmpExpansion {
58   struct ResultBlock {
59     BasicBlock *BB = nullptr;
60     PHINode *PhiSrc1 = nullptr;
61     PHINode *PhiSrc2 = nullptr;
62 
63     ResultBlock() = default;
64   };
65 
66   CallInst *const CI;
67   ResultBlock ResBlock;
68   const uint64_t Size;
69   unsigned MaxLoadSize;
70   uint64_t NumLoadsNonOneByte;
71   const uint64_t NumLoadsPerBlockForZeroCmp;
72   std::vector<BasicBlock *> LoadCmpBlocks;
73   BasicBlock *EndBlock;
74   PHINode *PhiRes;
75   const bool IsUsedForZeroCmp;
76   const DataLayout &DL;
77   IRBuilder<> Builder;
78   // Represents the decomposition in blocks of the expansion. For example,
79   // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
80   // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {1, 32}.
81   struct LoadEntry {
82     LoadEntry(unsigned LoadSize, uint64_t Offset)
83         : LoadSize(LoadSize), Offset(Offset) {
84     }
85 
86     // The size of the load for this block, in bytes.
87     unsigned LoadSize;
88     // The offset of this load from the base pointer, in bytes.
89     uint64_t Offset;
90   };
91   using LoadEntryVector = SmallVector<LoadEntry, 8>;
92   LoadEntryVector LoadSequence;
93 
94   void createLoadCmpBlocks();
95   void createResultBlock();
96   void setupResultBlockPHINodes();
97   void setupEndBlockPHINodes();
98   Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex);
99   void emitLoadCompareBlock(unsigned BlockIndex);
100   void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
101                                          unsigned &LoadIndex);
102   void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned OffsetBytes);
103   void emitMemCmpResultBlock();
104   Value *getMemCmpExpansionZeroCase();
105   Value *getMemCmpEqZeroOneBlock();
106   Value *getMemCmpOneBlock();
107   struct LoadPair {
108     Value *Lhs = nullptr;
109     Value *Rhs = nullptr;
110   };
111   LoadPair getLoadPair(Type *LoadSizeType, bool NeedsBSwap, Type *CmpSizeType,
112                        unsigned OffsetBytes);
113 
114   static LoadEntryVector
115   computeGreedyLoadSequence(uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
116                             unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte);
117   static LoadEntryVector
118   computeOverlappingLoadSequence(uint64_t Size, unsigned MaxLoadSize,
119                                  unsigned MaxNumLoads,
120                                  unsigned &NumLoadsNonOneByte);
121 
122 public:
123   MemCmpExpansion(CallInst *CI, uint64_t Size,
124                   const TargetTransformInfo::MemCmpExpansionOptions &Options,
125                   const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout);
126 
127   unsigned getNumBlocks();
128   uint64_t getNumLoads() const { return LoadSequence.size(); }
129 
130   Value *getMemCmpExpansion();
131 };
132 
133 MemCmpExpansion::LoadEntryVector MemCmpExpansion::computeGreedyLoadSequence(
134     uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
135     const unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte) {
136   NumLoadsNonOneByte = 0;
137   LoadEntryVector LoadSequence;
138   uint64_t Offset = 0;
139   while (Size && !LoadSizes.empty()) {
140     const unsigned LoadSize = LoadSizes.front();
141     const uint64_t NumLoadsForThisSize = Size / LoadSize;
142     if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
143       // Do not expand if the total number of loads is larger than what the
144       // target allows. Note that it's important that we exit before completing
145       // the expansion to avoid using a ton of memory to store the expansion for
146       // large sizes.
147       return {};
148     }
149     if (NumLoadsForThisSize > 0) {
150       for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
151         LoadSequence.push_back({LoadSize, Offset});
152         Offset += LoadSize;
153       }
154       if (LoadSize > 1)
155         ++NumLoadsNonOneByte;
156       Size = Size % LoadSize;
157     }
158     LoadSizes = LoadSizes.drop_front();
159   }
160   return LoadSequence;
161 }
162 
163 MemCmpExpansion::LoadEntryVector
164 MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
165                                                 const unsigned MaxLoadSize,
166                                                 const unsigned MaxNumLoads,
167                                                 unsigned &NumLoadsNonOneByte) {
168   // These are already handled by the greedy approach.
169   if (Size < 2 || MaxLoadSize < 2)
170     return {};
171 
172   // We try to do as many non-overlapping loads as possible starting from the
173   // beginning.
174   const uint64_t NumNonOverlappingLoads = Size / MaxLoadSize;
175   assert(NumNonOverlappingLoads && "there must be at least one load");
176   // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
177   // an overlapping load.
178   Size = Size - NumNonOverlappingLoads * MaxLoadSize;
179   // Bail if we do not need an overloapping store, this is already handled by
180   // the greedy approach.
181   if (Size == 0)
182     return {};
183   // Bail if the number of loads (non-overlapping + potential overlapping one)
184   // is larger than the max allowed.
185   if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
186     return {};
187 
188   // Add non-overlapping loads.
189   LoadEntryVector LoadSequence;
190   uint64_t Offset = 0;
191   for (uint64_t I = 0; I < NumNonOverlappingLoads; ++I) {
192     LoadSequence.push_back({MaxLoadSize, Offset});
193     Offset += MaxLoadSize;
194   }
195 
196   // Add the last overlapping load.
197   assert(Size > 0 && Size < MaxLoadSize && "broken invariant");
198   LoadSequence.push_back({MaxLoadSize, Offset - (MaxLoadSize - Size)});
199   NumLoadsNonOneByte = 1;
200   return LoadSequence;
201 }
202 
203 // Initialize the basic block structure required for expansion of memcmp call
204 // with given maximum load size and memcmp size parameter.
205 // This structure includes:
206 // 1. A list of load compare blocks - LoadCmpBlocks.
207 // 2. An EndBlock, split from original instruction point, which is the block to
208 // return from.
209 // 3. ResultBlock, block to branch to for early exit when a
210 // LoadCmpBlock finds a difference.
211 MemCmpExpansion::MemCmpExpansion(
212     CallInst *const CI, uint64_t Size,
213     const TargetTransformInfo::MemCmpExpansionOptions &Options,
214     const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout)
215     : CI(CI), Size(Size), MaxLoadSize(0), NumLoadsNonOneByte(0),
216       NumLoadsPerBlockForZeroCmp(Options.NumLoadsPerBlock),
217       IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), Builder(CI) {
218   assert(Size > 0 && "zero blocks");
219   // Scale the max size down if the target can load more bytes than we need.
220   llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
221   while (!LoadSizes.empty() && LoadSizes.front() > Size) {
222     LoadSizes = LoadSizes.drop_front();
223   }
224   assert(!LoadSizes.empty() && "cannot load Size bytes");
225   MaxLoadSize = LoadSizes.front();
226   // Compute the decomposition.
227   unsigned GreedyNumLoadsNonOneByte = 0;
228   LoadSequence = computeGreedyLoadSequence(Size, LoadSizes, Options.MaxNumLoads,
229                                            GreedyNumLoadsNonOneByte);
230   NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
231   assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
232   // If we allow overlapping loads and the load sequence is not already optimal,
233   // use overlapping loads.
234   if (Options.AllowOverlappingLoads &&
235       (LoadSequence.empty() || LoadSequence.size() > 2)) {
236     unsigned OverlappingNumLoadsNonOneByte = 0;
237     auto OverlappingLoads = computeOverlappingLoadSequence(
238         Size, MaxLoadSize, Options.MaxNumLoads, OverlappingNumLoadsNonOneByte);
239     if (!OverlappingLoads.empty() &&
240         (LoadSequence.empty() ||
241          OverlappingLoads.size() < LoadSequence.size())) {
242       LoadSequence = OverlappingLoads;
243       NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
244     }
245   }
246   assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
247 }
248 
249 unsigned MemCmpExpansion::getNumBlocks() {
250   if (IsUsedForZeroCmp)
251     return getNumLoads() / NumLoadsPerBlockForZeroCmp +
252            (getNumLoads() % NumLoadsPerBlockForZeroCmp != 0 ? 1 : 0);
253   return getNumLoads();
254 }
255 
256 void MemCmpExpansion::createLoadCmpBlocks() {
257   for (unsigned i = 0; i < getNumBlocks(); i++) {
258     BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb",
259                                         EndBlock->getParent(), EndBlock);
260     LoadCmpBlocks.push_back(BB);
261   }
262 }
263 
264 void MemCmpExpansion::createResultBlock() {
265   ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block",
266                                    EndBlock->getParent(), EndBlock);
267 }
268 
269 MemCmpExpansion::LoadPair MemCmpExpansion::getLoadPair(Type *LoadSizeType,
270                                                        bool NeedsBSwap,
271                                                        Type *CmpSizeType,
272                                                        unsigned OffsetBytes) {
273   // Get the memory source at offset `OffsetBytes`.
274   Value *LhsSource = CI->getArgOperand(0);
275   Value *RhsSource = CI->getArgOperand(1);
276   Align LhsAlign = LhsSource->getPointerAlignment(DL).valueOrOne();
277   Align RhsAlign = RhsSource->getPointerAlignment(DL).valueOrOne();
278   if (OffsetBytes > 0) {
279     auto *ByteType = Type::getInt8Ty(CI->getContext());
280     LhsSource = Builder.CreateConstGEP1_64(
281         ByteType, Builder.CreateBitCast(LhsSource, ByteType->getPointerTo()),
282         OffsetBytes);
283     RhsSource = Builder.CreateConstGEP1_64(
284         ByteType, Builder.CreateBitCast(RhsSource, ByteType->getPointerTo()),
285         OffsetBytes);
286     LhsAlign = commonAlignment(LhsAlign, OffsetBytes);
287     RhsAlign = commonAlignment(RhsAlign, OffsetBytes);
288   }
289   LhsSource = Builder.CreateBitCast(LhsSource, LoadSizeType->getPointerTo());
290   RhsSource = Builder.CreateBitCast(RhsSource, LoadSizeType->getPointerTo());
291 
292   // Create a constant or a load from the source.
293   Value *Lhs = nullptr;
294   if (auto *C = dyn_cast<Constant>(LhsSource))
295     Lhs = ConstantFoldLoadFromConstPtr(C, LoadSizeType, DL);
296   if (!Lhs)
297     Lhs = Builder.CreateAlignedLoad(LoadSizeType, LhsSource, LhsAlign);
298 
299   Value *Rhs = nullptr;
300   if (auto *C = dyn_cast<Constant>(RhsSource))
301     Rhs = ConstantFoldLoadFromConstPtr(C, LoadSizeType, DL);
302   if (!Rhs)
303     Rhs = Builder.CreateAlignedLoad(LoadSizeType, RhsSource, RhsAlign);
304 
305   // Swap bytes if required.
306   if (NeedsBSwap) {
307     Function *Bswap = Intrinsic::getDeclaration(CI->getModule(),
308                                                 Intrinsic::bswap, LoadSizeType);
309     Lhs = Builder.CreateCall(Bswap, Lhs);
310     Rhs = Builder.CreateCall(Bswap, Rhs);
311   }
312 
313   // Zero extend if required.
314   if (CmpSizeType != nullptr && CmpSizeType != LoadSizeType) {
315     Lhs = Builder.CreateZExt(Lhs, CmpSizeType);
316     Rhs = Builder.CreateZExt(Rhs, CmpSizeType);
317   }
318   return {Lhs, Rhs};
319 }
320 
321 // This function creates the IR instructions for loading and comparing 1 byte.
322 // It loads 1 byte from each source of the memcmp parameters with the given
323 // GEPIndex. It then subtracts the two loaded values and adds this result to the
324 // final phi node for selecting the memcmp result.
325 void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
326                                                unsigned OffsetBytes) {
327   Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
328   const LoadPair Loads =
329       getLoadPair(Type::getInt8Ty(CI->getContext()), /*NeedsBSwap=*/false,
330                   Type::getInt32Ty(CI->getContext()), OffsetBytes);
331   Value *Diff = Builder.CreateSub(Loads.Lhs, Loads.Rhs);
332 
333   PhiRes->addIncoming(Diff, LoadCmpBlocks[BlockIndex]);
334 
335   if (BlockIndex < (LoadCmpBlocks.size() - 1)) {
336     // Early exit branch if difference found to EndBlock. Otherwise, continue to
337     // next LoadCmpBlock,
338     Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff,
339                                     ConstantInt::get(Diff->getType(), 0));
340     BranchInst *CmpBr =
341         BranchInst::Create(EndBlock, LoadCmpBlocks[BlockIndex + 1], Cmp);
342     Builder.Insert(CmpBr);
343   } else {
344     // The last block has an unconditional branch to EndBlock.
345     BranchInst *CmpBr = BranchInst::Create(EndBlock);
346     Builder.Insert(CmpBr);
347   }
348 }
349 
350 /// Generate an equality comparison for one or more pairs of loaded values.
351 /// This is used in the case where the memcmp() call is compared equal or not
352 /// equal to zero.
353 Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex,
354                                             unsigned &LoadIndex) {
355   assert(LoadIndex < getNumLoads() &&
356          "getCompareLoadPairs() called with no remaining loads");
357   std::vector<Value *> XorList, OrList;
358   Value *Diff = nullptr;
359 
360   const unsigned NumLoads =
361       std::min(getNumLoads() - LoadIndex, NumLoadsPerBlockForZeroCmp);
362 
363   // For a single-block expansion, start inserting before the memcmp call.
364   if (LoadCmpBlocks.empty())
365     Builder.SetInsertPoint(CI);
366   else
367     Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
368 
369   Value *Cmp = nullptr;
370   // If we have multiple loads per block, we need to generate a composite
371   // comparison using xor+or. The type for the combinations is the largest load
372   // type.
373   IntegerType *const MaxLoadType =
374       NumLoads == 1 ? nullptr
375                     : IntegerType::get(CI->getContext(), MaxLoadSize * 8);
376   for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) {
377     const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex];
378     const LoadPair Loads = getLoadPair(
379         IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8),
380         /*NeedsBSwap=*/false, MaxLoadType, CurLoadEntry.Offset);
381 
382     if (NumLoads != 1) {
383       // If we have multiple loads per block, we need to generate a composite
384       // comparison using xor+or.
385       Diff = Builder.CreateXor(Loads.Lhs, Loads.Rhs);
386       Diff = Builder.CreateZExt(Diff, MaxLoadType);
387       XorList.push_back(Diff);
388     } else {
389       // If there's only one load per block, we just compare the loaded values.
390       Cmp = Builder.CreateICmpNE(Loads.Lhs, Loads.Rhs);
391     }
392   }
393 
394   auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> {
395     std::vector<Value *> OutList;
396     for (unsigned i = 0; i < InList.size() - 1; i = i + 2) {
397       Value *Or = Builder.CreateOr(InList[i], InList[i + 1]);
398       OutList.push_back(Or);
399     }
400     if (InList.size() % 2 != 0)
401       OutList.push_back(InList.back());
402     return OutList;
403   };
404 
405   if (!Cmp) {
406     // Pairwise OR the XOR results.
407     OrList = pairWiseOr(XorList);
408 
409     // Pairwise OR the OR results until one result left.
410     while (OrList.size() != 1) {
411       OrList = pairWiseOr(OrList);
412     }
413 
414     assert(Diff && "Failed to find comparison diff");
415     Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0));
416   }
417 
418   return Cmp;
419 }
420 
421 void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
422                                                         unsigned &LoadIndex) {
423   Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex);
424 
425   BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
426                            ? EndBlock
427                            : LoadCmpBlocks[BlockIndex + 1];
428   // Early exit branch if difference found to ResultBlock. Otherwise,
429   // continue to next LoadCmpBlock or EndBlock.
430   BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp);
431   Builder.Insert(CmpBr);
432 
433   // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
434   // since early exit to ResultBlock was not taken (no difference was found in
435   // any of the bytes).
436   if (BlockIndex == LoadCmpBlocks.size() - 1) {
437     Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
438     PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
439   }
440 }
441 
442 // This function creates the IR intructions for loading and comparing using the
443 // given LoadSize. It loads the number of bytes specified by LoadSize from each
444 // source of the memcmp parameters. It then does a subtract to see if there was
445 // a difference in the loaded values. If a difference is found, it branches
446 // with an early exit to the ResultBlock for calculating which source was
447 // larger. Otherwise, it falls through to the either the next LoadCmpBlock or
448 // the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with
449 // a special case through emitLoadCompareByteBlock. The special handling can
450 // simply subtract the loaded values and add it to the result phi node.
451 void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
452   // There is one load per block in this case, BlockIndex == LoadIndex.
453   const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
454 
455   if (CurLoadEntry.LoadSize == 1) {
456     MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, CurLoadEntry.Offset);
457     return;
458   }
459 
460   Type *LoadSizeType =
461       IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
462   Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
463   assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
464 
465   Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
466 
467   const LoadPair Loads =
468       getLoadPair(LoadSizeType, /*NeedsBSwap=*/DL.isLittleEndian(), MaxLoadType,
469                   CurLoadEntry.Offset);
470 
471   // Add the loaded values to the phi nodes for calculating memcmp result only
472   // if result is not used in a zero equality.
473   if (!IsUsedForZeroCmp) {
474     ResBlock.PhiSrc1->addIncoming(Loads.Lhs, LoadCmpBlocks[BlockIndex]);
475     ResBlock.PhiSrc2->addIncoming(Loads.Rhs, LoadCmpBlocks[BlockIndex]);
476   }
477 
478   Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, Loads.Lhs, Loads.Rhs);
479   BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
480                            ? EndBlock
481                            : LoadCmpBlocks[BlockIndex + 1];
482   // Early exit branch if difference found to ResultBlock. Otherwise, continue
483   // to next LoadCmpBlock or EndBlock.
484   BranchInst *CmpBr = BranchInst::Create(NextBB, ResBlock.BB, Cmp);
485   Builder.Insert(CmpBr);
486 
487   // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
488   // since early exit to ResultBlock was not taken (no difference was found in
489   // any of the bytes).
490   if (BlockIndex == LoadCmpBlocks.size() - 1) {
491     Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
492     PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
493   }
494 }
495 
496 // This function populates the ResultBlock with a sequence to calculate the
497 // memcmp result. It compares the two loaded source values and returns -1 if
498 // src1 < src2 and 1 if src1 > src2.
499 void MemCmpExpansion::emitMemCmpResultBlock() {
500   // Special case: if memcmp result is used in a zero equality, result does not
501   // need to be calculated and can simply return 1.
502   if (IsUsedForZeroCmp) {
503     BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
504     Builder.SetInsertPoint(ResBlock.BB, InsertPt);
505     Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
506     PhiRes->addIncoming(Res, ResBlock.BB);
507     BranchInst *NewBr = BranchInst::Create(EndBlock);
508     Builder.Insert(NewBr);
509     return;
510   }
511   BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
512   Builder.SetInsertPoint(ResBlock.BB, InsertPt);
513 
514   Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
515                                   ResBlock.PhiSrc2);
516 
517   Value *Res =
518       Builder.CreateSelect(Cmp, ConstantInt::get(Builder.getInt32Ty(), -1),
519                            ConstantInt::get(Builder.getInt32Ty(), 1));
520 
521   BranchInst *NewBr = BranchInst::Create(EndBlock);
522   Builder.Insert(NewBr);
523   PhiRes->addIncoming(Res, ResBlock.BB);
524 }
525 
526 void MemCmpExpansion::setupResultBlockPHINodes() {
527   Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
528   Builder.SetInsertPoint(ResBlock.BB);
529   // Note: this assumes one load per block.
530   ResBlock.PhiSrc1 =
531       Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src1");
532   ResBlock.PhiSrc2 =
533       Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src2");
534 }
535 
536 void MemCmpExpansion::setupEndBlockPHINodes() {
537   Builder.SetInsertPoint(&EndBlock->front());
538   PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
539 }
540 
541 Value *MemCmpExpansion::getMemCmpExpansionZeroCase() {
542   unsigned LoadIndex = 0;
543   // This loop populates each of the LoadCmpBlocks with the IR sequence to
544   // handle multiple loads per block.
545   for (unsigned I = 0; I < getNumBlocks(); ++I) {
546     emitLoadCompareBlockMultipleLoads(I, LoadIndex);
547   }
548 
549   emitMemCmpResultBlock();
550   return PhiRes;
551 }
552 
553 /// A memcmp expansion that compares equality with 0 and only has one block of
554 /// load and compare can bypass the compare, branch, and phi IR that is required
555 /// in the general case.
556 Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() {
557   unsigned LoadIndex = 0;
558   Value *Cmp = getCompareLoadPairs(0, LoadIndex);
559   assert(LoadIndex == getNumLoads() && "some entries were not consumed");
560   return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext()));
561 }
562 
563 /// A memcmp expansion that only has one block of load and compare can bypass
564 /// the compare, branch, and phi IR that is required in the general case.
565 Value *MemCmpExpansion::getMemCmpOneBlock() {
566   Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8);
567   bool NeedsBSwap = DL.isLittleEndian() && Size != 1;
568 
569   // The i8 and i16 cases don't need compares. We zext the loaded values and
570   // subtract them to get the suitable negative, zero, or positive i32 result.
571   if (Size < 4) {
572     const LoadPair Loads =
573         getLoadPair(LoadSizeType, NeedsBSwap, Builder.getInt32Ty(),
574                     /*Offset*/ 0);
575     return Builder.CreateSub(Loads.Lhs, Loads.Rhs);
576   }
577 
578   const LoadPair Loads = getLoadPair(LoadSizeType, NeedsBSwap, LoadSizeType,
579                                      /*Offset*/ 0);
580   // The result of memcmp is negative, zero, or positive, so produce that by
581   // subtracting 2 extended compare bits: sub (ugt, ult).
582   // If a target prefers to use selects to get -1/0/1, they should be able
583   // to transform this later. The inverse transform (going from selects to math)
584   // may not be possible in the DAG because the selects got converted into
585   // branches before we got there.
586   Value *CmpUGT = Builder.CreateICmpUGT(Loads.Lhs, Loads.Rhs);
587   Value *CmpULT = Builder.CreateICmpULT(Loads.Lhs, Loads.Rhs);
588   Value *ZextUGT = Builder.CreateZExt(CmpUGT, Builder.getInt32Ty());
589   Value *ZextULT = Builder.CreateZExt(CmpULT, Builder.getInt32Ty());
590   return Builder.CreateSub(ZextUGT, ZextULT);
591 }
592 
593 // This function expands the memcmp call into an inline expansion and returns
594 // the memcmp result.
595 Value *MemCmpExpansion::getMemCmpExpansion() {
596   // Create the basic block framework for a multi-block expansion.
597   if (getNumBlocks() != 1) {
598     BasicBlock *StartBlock = CI->getParent();
599     EndBlock = StartBlock->splitBasicBlock(CI, "endblock");
600     setupEndBlockPHINodes();
601     createResultBlock();
602 
603     // If return value of memcmp is not used in a zero equality, we need to
604     // calculate which source was larger. The calculation requires the
605     // two loaded source values of each load compare block.
606     // These will be saved in the phi nodes created by setupResultBlockPHINodes.
607     if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
608 
609     // Create the number of required load compare basic blocks.
610     createLoadCmpBlocks();
611 
612     // Update the terminator added by splitBasicBlock to branch to the first
613     // LoadCmpBlock.
614     StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]);
615   }
616 
617   Builder.SetCurrentDebugLocation(CI->getDebugLoc());
618 
619   if (IsUsedForZeroCmp)
620     return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
621                                : getMemCmpExpansionZeroCase();
622 
623   if (getNumBlocks() == 1)
624     return getMemCmpOneBlock();
625 
626   for (unsigned I = 0; I < getNumBlocks(); ++I) {
627     emitLoadCompareBlock(I);
628   }
629 
630   emitMemCmpResultBlock();
631   return PhiRes;
632 }
633 
634 // This function checks to see if an expansion of memcmp can be generated.
635 // It checks for constant compare size that is less than the max inline size.
636 // If an expansion cannot occur, returns false to leave as a library call.
637 // Otherwise, the library call is replaced with a new IR instruction sequence.
638 /// We want to transform:
639 /// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
640 /// To:
641 /// loadbb:
642 ///  %0 = bitcast i32* %buffer2 to i8*
643 ///  %1 = bitcast i32* %buffer1 to i8*
644 ///  %2 = bitcast i8* %1 to i64*
645 ///  %3 = bitcast i8* %0 to i64*
646 ///  %4 = load i64, i64* %2
647 ///  %5 = load i64, i64* %3
648 ///  %6 = call i64 @llvm.bswap.i64(i64 %4)
649 ///  %7 = call i64 @llvm.bswap.i64(i64 %5)
650 ///  %8 = sub i64 %6, %7
651 ///  %9 = icmp ne i64 %8, 0
652 ///  br i1 %9, label %res_block, label %loadbb1
653 /// res_block:                                        ; preds = %loadbb2,
654 /// %loadbb1, %loadbb
655 ///  %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ]
656 ///  %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ]
657 ///  %10 = icmp ult i64 %phi.src1, %phi.src2
658 ///  %11 = select i1 %10, i32 -1, i32 1
659 ///  br label %endblock
660 /// loadbb1:                                          ; preds = %loadbb
661 ///  %12 = bitcast i32* %buffer2 to i8*
662 ///  %13 = bitcast i32* %buffer1 to i8*
663 ///  %14 = bitcast i8* %13 to i32*
664 ///  %15 = bitcast i8* %12 to i32*
665 ///  %16 = getelementptr i32, i32* %14, i32 2
666 ///  %17 = getelementptr i32, i32* %15, i32 2
667 ///  %18 = load i32, i32* %16
668 ///  %19 = load i32, i32* %17
669 ///  %20 = call i32 @llvm.bswap.i32(i32 %18)
670 ///  %21 = call i32 @llvm.bswap.i32(i32 %19)
671 ///  %22 = zext i32 %20 to i64
672 ///  %23 = zext i32 %21 to i64
673 ///  %24 = sub i64 %22, %23
674 ///  %25 = icmp ne i64 %24, 0
675 ///  br i1 %25, label %res_block, label %loadbb2
676 /// loadbb2:                                          ; preds = %loadbb1
677 ///  %26 = bitcast i32* %buffer2 to i8*
678 ///  %27 = bitcast i32* %buffer1 to i8*
679 ///  %28 = bitcast i8* %27 to i16*
680 ///  %29 = bitcast i8* %26 to i16*
681 ///  %30 = getelementptr i16, i16* %28, i16 6
682 ///  %31 = getelementptr i16, i16* %29, i16 6
683 ///  %32 = load i16, i16* %30
684 ///  %33 = load i16, i16* %31
685 ///  %34 = call i16 @llvm.bswap.i16(i16 %32)
686 ///  %35 = call i16 @llvm.bswap.i16(i16 %33)
687 ///  %36 = zext i16 %34 to i64
688 ///  %37 = zext i16 %35 to i64
689 ///  %38 = sub i64 %36, %37
690 ///  %39 = icmp ne i64 %38, 0
691 ///  br i1 %39, label %res_block, label %loadbb3
692 /// loadbb3:                                          ; preds = %loadbb2
693 ///  %40 = bitcast i32* %buffer2 to i8*
694 ///  %41 = bitcast i32* %buffer1 to i8*
695 ///  %42 = getelementptr i8, i8* %41, i8 14
696 ///  %43 = getelementptr i8, i8* %40, i8 14
697 ///  %44 = load i8, i8* %42
698 ///  %45 = load i8, i8* %43
699 ///  %46 = zext i8 %44 to i32
700 ///  %47 = zext i8 %45 to i32
701 ///  %48 = sub i32 %46, %47
702 ///  br label %endblock
703 /// endblock:                                         ; preds = %res_block,
704 /// %loadbb3
705 ///  %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
706 ///  ret i32 %phi.res
707 static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
708                          const TargetLowering *TLI, const DataLayout *DL,
709                          ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
710   NumMemCmpCalls++;
711 
712   // Early exit from expansion if -Oz.
713   if (CI->getFunction()->hasMinSize())
714     return false;
715 
716   // Early exit from expansion if size is not a constant.
717   ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2));
718   if (!SizeCast) {
719     NumMemCmpNotConstant++;
720     return false;
721   }
722   const uint64_t SizeVal = SizeCast->getZExtValue();
723 
724   if (SizeVal == 0) {
725     return false;
726   }
727   // TTI call to check if target would like to expand memcmp. Also, get the
728   // available load sizes.
729   const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI);
730   bool OptForSize = CI->getFunction()->hasOptSize() ||
731                     llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
732   auto Options = TTI->enableMemCmpExpansion(OptForSize,
733                                             IsUsedForZeroCmp);
734   if (!Options) return false;
735 
736   if (MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences())
737     Options.NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock;
738 
739   if (OptForSize &&
740       MaxLoadsPerMemcmpOptSize.getNumOccurrences())
741     Options.MaxNumLoads = MaxLoadsPerMemcmpOptSize;
742 
743   if (!OptForSize && MaxLoadsPerMemcmp.getNumOccurrences())
744     Options.MaxNumLoads = MaxLoadsPerMemcmp;
745 
746   MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL);
747 
748   // Don't expand if this will require more loads than desired by the target.
749   if (Expansion.getNumLoads() == 0) {
750     NumMemCmpGreaterThanMax++;
751     return false;
752   }
753 
754   NumMemCmpInlined++;
755 
756   Value *Res = Expansion.getMemCmpExpansion();
757 
758   // Replace call with result of expansion and erase call.
759   CI->replaceAllUsesWith(Res);
760   CI->eraseFromParent();
761 
762   return true;
763 }
764 
765 
766 
767 class ExpandMemCmpPass : public FunctionPass {
768 public:
769   static char ID;
770 
771   ExpandMemCmpPass() : FunctionPass(ID) {
772     initializeExpandMemCmpPassPass(*PassRegistry::getPassRegistry());
773   }
774 
775   bool runOnFunction(Function &F) override {
776     if (skipFunction(F)) return false;
777 
778     auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
779     if (!TPC) {
780       return false;
781     }
782     const TargetLowering* TL =
783         TPC->getTM<TargetMachine>().getSubtargetImpl(F)->getTargetLowering();
784 
785     const TargetLibraryInfo *TLI =
786         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
787     const TargetTransformInfo *TTI =
788         &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
789     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
790     auto *BFI = (PSI && PSI->hasProfileSummary()) ?
791            &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
792            nullptr;
793     auto PA = runImpl(F, TLI, TTI, TL, PSI, BFI);
794     return !PA.areAllPreserved();
795   }
796 
797 private:
798   void getAnalysisUsage(AnalysisUsage &AU) const override {
799     AU.addRequired<TargetLibraryInfoWrapperPass>();
800     AU.addRequired<TargetTransformInfoWrapperPass>();
801     AU.addRequired<ProfileSummaryInfoWrapperPass>();
802     LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
803     FunctionPass::getAnalysisUsage(AU);
804   }
805 
806   PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
807                             const TargetTransformInfo *TTI,
808                             const TargetLowering* TL,
809                             ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI);
810   // Returns true if a change was made.
811   bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
812                   const TargetTransformInfo *TTI, const TargetLowering* TL,
813                   const DataLayout& DL, ProfileSummaryInfo *PSI,
814                   BlockFrequencyInfo *BFI);
815 };
816 
817 bool ExpandMemCmpPass::runOnBlock(
818     BasicBlock &BB, const TargetLibraryInfo *TLI,
819     const TargetTransformInfo *TTI, const TargetLowering* TL,
820     const DataLayout& DL, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
821   for (Instruction& I : BB) {
822     CallInst *CI = dyn_cast<CallInst>(&I);
823     if (!CI) {
824       continue;
825     }
826     LibFunc Func;
827     if (TLI->getLibFunc(*CI, Func) &&
828         (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
829         expandMemCmp(CI, TTI, TL, &DL, PSI, BFI)) {
830       return true;
831     }
832   }
833   return false;
834 }
835 
836 
837 PreservedAnalyses ExpandMemCmpPass::runImpl(
838     Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI,
839     const TargetLowering* TL, ProfileSummaryInfo *PSI,
840     BlockFrequencyInfo *BFI) {
841   const DataLayout& DL = F.getParent()->getDataLayout();
842   bool MadeChanges = false;
843   for (auto BBIt = F.begin(); BBIt != F.end();) {
844     if (runOnBlock(*BBIt, TLI, TTI, TL, DL, PSI, BFI)) {
845       MadeChanges = true;
846       // If changes were made, restart the function from the beginning, since
847       // the structure of the function was changed.
848       BBIt = F.begin();
849     } else {
850       ++BBIt;
851     }
852   }
853   if (MadeChanges)
854     for (BasicBlock &BB : F)
855       SimplifyInstructionsInBlock(&BB);
856   return MadeChanges ? PreservedAnalyses::none() : PreservedAnalyses::all();
857 }
858 
859 } // namespace
860 
861 char ExpandMemCmpPass::ID = 0;
862 INITIALIZE_PASS_BEGIN(ExpandMemCmpPass, "expandmemcmp",
863                       "Expand memcmp() to load/stores", false, false)
864 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
865 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
866 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
867 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
868 INITIALIZE_PASS_END(ExpandMemCmpPass, "expandmemcmp",
869                     "Expand memcmp() to load/stores", false, false)
870 
871 FunctionPass *llvm::createExpandMemCmpPass() {
872   return new ExpandMemCmpPass();
873 }
874