1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the auto-upgrade helper functions.
11 // This is where deprecated IR intrinsics and other IR features are updated to
12 // current specifications.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/IR/AutoUpgrade.h"
17 #include "llvm/IR/CFG.h"
18 #include "llvm/IR/CallSite.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/DIBuilder.h"
21 #include "llvm/IR/DebugInfo.h"
22 #include "llvm/IR/DiagnosticInfo.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/Instruction.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/IR/Module.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/Regex.h"
31 #include <cstring>
32 using namespace llvm;
33 
34 // Upgrade the declarations of the SSE4.1 functions whose arguments have
35 // changed their type from v4f32 to v2i64.
36 static bool UpgradeSSE41Function(Function* F, Intrinsic::ID IID,
37                                  Function *&NewFn) {
38   // Check whether this is an old version of the function, which received
39   // v4f32 arguments.
40   Type *Arg0Type = F->getFunctionType()->getParamType(0);
41   if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4))
42     return false;
43 
44   // Yes, it's old, replace it with new version.
45   F->setName(F->getName() + ".old");
46   NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
47   return true;
48 }
49 
50 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
51 // arguments have changed their type from i32 to i8.
52 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
53                                              Function *&NewFn) {
54   // Check that the last argument is an i32.
55   Type *LastArgType = F->getFunctionType()->getParamType(
56      F->getFunctionType()->getNumParams() - 1);
57   if (!LastArgType->isIntegerTy(32))
58     return false;
59 
60   // Move this function aside and map down.
61   F->setName(F->getName() + ".old");
62   NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
63   return true;
64 }
65 
66 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
67   assert(F && "Illegal to upgrade a non-existent Function.");
68 
69   // Quickly eliminate it, if it's not a candidate.
70   StringRef Name = F->getName();
71   if (Name.size() <= 8 || !Name.startswith("llvm."))
72     return false;
73   Name = Name.substr(5); // Strip off "llvm."
74 
75   switch (Name[0]) {
76   default: break;
77   case 'a': {
78     if (Name.startswith("arm.neon.vclz")) {
79       Type* args[2] = {
80         F->arg_begin()->getType(),
81         Type::getInt1Ty(F->getContext())
82       };
83       // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
84       // the end of the name. Change name from llvm.arm.neon.vclz.* to
85       //  llvm.ctlz.*
86       FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
87       NewFn = Function::Create(fType, F->getLinkage(),
88                                "llvm.ctlz." + Name.substr(14), F->getParent());
89       return true;
90     }
91     if (Name.startswith("arm.neon.vcnt")) {
92       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
93                                         F->arg_begin()->getType());
94       return true;
95     }
96     Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
97     if (vldRegex.match(Name)) {
98       auto fArgs = F->getFunctionType()->params();
99       SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
100       // Can't use Intrinsic::getDeclaration here as the return types might
101       // then only be structurally equal.
102       FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
103       NewFn = Function::Create(fType, F->getLinkage(),
104                                "llvm." + Name + ".p0i8", F->getParent());
105       return true;
106     }
107     Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
108     if (vstRegex.match(Name)) {
109       static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
110                                                 Intrinsic::arm_neon_vst2,
111                                                 Intrinsic::arm_neon_vst3,
112                                                 Intrinsic::arm_neon_vst4};
113 
114       static const Intrinsic::ID StoreLaneInts[] = {
115         Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
116         Intrinsic::arm_neon_vst4lane
117       };
118 
119       auto fArgs = F->getFunctionType()->params();
120       Type *Tys[] = {fArgs[0], fArgs[1]};
121       if (Name.find("lane") == StringRef::npos)
122         NewFn = Intrinsic::getDeclaration(F->getParent(),
123                                           StoreInts[fArgs.size() - 3], Tys);
124       else
125         NewFn = Intrinsic::getDeclaration(F->getParent(),
126                                           StoreLaneInts[fArgs.size() - 5], Tys);
127       return true;
128     }
129     if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
130       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
131       return true;
132     }
133     break;
134   }
135 
136   case 'c': {
137     if (Name.startswith("ctlz.") && F->arg_size() == 1) {
138       F->setName(Name + ".old");
139       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
140                                         F->arg_begin()->getType());
141       return true;
142     }
143     if (Name.startswith("cttz.") && F->arg_size() == 1) {
144       F->setName(Name + ".old");
145       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
146                                         F->arg_begin()->getType());
147       return true;
148     }
149     break;
150   }
151 
152   case 'o':
153     // We only need to change the name to match the mangling including the
154     // address space.
155     if (F->arg_size() == 2 && Name.startswith("objectsize.")) {
156       Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
157       if (F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
158         F->setName(Name + ".old");
159         NewFn = Intrinsic::getDeclaration(F->getParent(),
160                                           Intrinsic::objectsize, Tys);
161         return true;
162       }
163     }
164     break;
165 
166   case 's':
167     if (Name == "stackprotectorcheck") {
168       NewFn = nullptr;
169       return true;
170     }
171 
172   case 'x': {
173     if (Name.startswith("x86.sse2.pcmpeq.") ||
174         Name.startswith("x86.sse2.pcmpgt.") ||
175         Name.startswith("x86.avx2.pcmpeq.") ||
176         Name.startswith("x86.avx2.pcmpgt.") ||
177         Name.startswith("x86.avx2.vbroadcast") ||
178         Name.startswith("x86.avx2.pbroadcast") ||
179         Name.startswith("x86.avx.vpermil.") ||
180         Name.startswith("x86.sse41.pmovsx") ||
181         Name.startswith("x86.sse41.pmovzx") ||
182         Name.startswith("x86.avx2.pmovsx") ||
183         Name.startswith("x86.avx2.pmovzx") ||
184         Name == "x86.sse2.cvtdq2pd" ||
185         Name == "x86.sse2.cvtps2pd" ||
186         Name == "x86.avx.cvtdq2.pd.256" ||
187         Name == "x86.avx.cvt.ps2.pd.256" ||
188         Name == "x86.sse2.cvttps2dq" ||
189         Name.startswith("x86.avx.cvtt.") ||
190         Name.startswith("x86.avx.vinsertf128.") ||
191         Name == "x86.avx2.vinserti128" ||
192         Name.startswith("x86.avx.vextractf128.") ||
193         Name == "x86.avx2.vextracti128" ||
194         Name.startswith("x86.avx.movnt.") ||
195         Name == "x86.sse2.storel.dq" ||
196         Name.startswith("x86.sse.storeu.") ||
197         Name.startswith("x86.sse2.storeu.") ||
198         Name.startswith("x86.avx.storeu.") ||
199         Name.startswith("x86.avx512.mask.storeu.p") ||
200         Name.startswith("x86.avx512.mask.storeu.b.") ||
201         Name.startswith("x86.avx512.mask.storeu.w.") ||
202         Name.startswith("x86.avx512.mask.storeu.d.") ||
203         Name.startswith("x86.avx512.mask.storeu.q.") ||
204         Name.startswith("x86.avx512.mask.store.p") ||
205         Name.startswith("x86.avx512.mask.store.b.") ||
206         Name.startswith("x86.avx512.mask.store.w.") ||
207         Name.startswith("x86.avx512.mask.store.d.") ||
208         Name.startswith("x86.avx512.mask.store.q.") ||
209         Name.startswith("x86.avx512.mask.loadu.p") ||
210         Name.startswith("x86.avx512.mask.loadu.b.") ||
211         Name.startswith("x86.avx512.mask.loadu.w.") ||
212         Name.startswith("x86.avx512.mask.loadu.d.") ||
213         Name.startswith("x86.avx512.mask.loadu.q.") ||
214         Name.startswith("x86.avx512.mask.load.p") ||
215         Name.startswith("x86.avx512.mask.load.b.") ||
216         Name.startswith("x86.avx512.mask.load.w.") ||
217         Name.startswith("x86.avx512.mask.load.d.") ||
218         Name.startswith("x86.avx512.mask.load.q.") ||
219         Name == "x86.sse42.crc32.64.8" ||
220         Name.startswith("x86.avx.vbroadcast.s") ||
221         Name.startswith("x86.sse2.psll.dq") ||
222         Name.startswith("x86.sse2.psrl.dq") ||
223         Name.startswith("x86.avx2.psll.dq") ||
224         Name.startswith("x86.avx2.psrl.dq") ||
225         Name == "x86.sse41.pblendw" ||
226         Name.startswith("x86.sse41.blendp") ||
227         Name.startswith("x86.avx.blend.p") ||
228         Name == "x86.avx2.pblendw" ||
229         Name.startswith("x86.avx2.pblendd.") ||
230         Name == "x86.avx2.vbroadcasti128" ||
231         Name == "x86.xop.vpcmov" ||
232         (Name.startswith("x86.xop.vpcom") && F->arg_size() == 2)) {
233       NewFn = nullptr;
234       return true;
235     }
236     // SSE4.1 ptest functions may have an old signature.
237     if (Name.startswith("x86.sse41.ptest")) {
238       if (Name == "x86.sse41.ptestc")
239         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestc, NewFn);
240       if (Name == "x86.sse41.ptestz")
241         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestz, NewFn);
242       if (Name == "x86.sse41.ptestnzc")
243         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
244     }
245     // Several blend and other instructions with masks used the wrong number of
246     // bits.
247     if (Name == "x86.sse41.insertps")
248       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
249                                               NewFn);
250     if (Name == "x86.sse41.dppd")
251       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
252                                               NewFn);
253     if (Name == "x86.sse41.dpps")
254       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
255                                               NewFn);
256     if (Name == "x86.sse41.mpsadbw")
257       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
258                                               NewFn);
259     if (Name == "x86.avx.dp.ps.256")
260       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
261                                               NewFn);
262     if (Name == "x86.avx2.mpsadbw")
263       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
264                                               NewFn);
265 
266     // frcz.ss/sd may need to have an argument dropped
267     if (Name.startswith("x86.xop.vfrcz.ss") && F->arg_size() == 2) {
268       F->setName(Name + ".old");
269       NewFn = Intrinsic::getDeclaration(F->getParent(),
270                                         Intrinsic::x86_xop_vfrcz_ss);
271       return true;
272     }
273     if (Name.startswith("x86.xop.vfrcz.sd") && F->arg_size() == 2) {
274       F->setName(Name + ".old");
275       NewFn = Intrinsic::getDeclaration(F->getParent(),
276                                         Intrinsic::x86_xop_vfrcz_sd);
277       return true;
278     }
279     // Fix the FMA4 intrinsics to remove the 4
280     if (Name.startswith("x86.fma4.")) {
281       F->setName("llvm.x86.fma" + Name.substr(8));
282       NewFn = F;
283       return true;
284     }
285     // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
286     if (Name.startswith("x86.xop.vpermil2")) {
287       auto Params = F->getFunctionType()->params();
288       auto Idx = Params[2];
289       if (Idx->getScalarType()->isFloatingPointTy()) {
290         F->setName(Name + ".old");
291         unsigned IdxSize = Idx->getPrimitiveSizeInBits();
292         unsigned EltSize = Idx->getScalarSizeInBits();
293         Intrinsic::ID Permil2ID;
294         if (EltSize == 64 && IdxSize == 128)
295           Permil2ID = Intrinsic::x86_xop_vpermil2pd;
296         else if (EltSize == 32 && IdxSize == 128)
297           Permil2ID = Intrinsic::x86_xop_vpermil2ps;
298         else if (EltSize == 64 && IdxSize == 256)
299           Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
300         else
301           Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
302         NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
303         return true;
304       }
305     }
306     break;
307   }
308   }
309 
310   //  This may not belong here. This function is effectively being overloaded
311   //  to both detect an intrinsic which needs upgrading, and to provide the
312   //  upgraded form of the intrinsic. We should perhaps have two separate
313   //  functions for this.
314   return false;
315 }
316 
317 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
318   NewFn = nullptr;
319   bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
320   assert(F != NewFn && "Intrinsic function upgraded to the same function");
321 
322   // Upgrade intrinsic attributes.  This does not change the function.
323   if (NewFn)
324     F = NewFn;
325   if (Intrinsic::ID id = F->getIntrinsicID())
326     F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
327   return Upgraded;
328 }
329 
330 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
331   // Nothing to do yet.
332   return false;
333 }
334 
335 // Handles upgrading SSE2 and AVX2 PSLLDQ intrinsics by converting them
336 // to byte shuffles.
337 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
338                                          Value *Op, unsigned Shift) {
339   Type *ResultTy = Op->getType();
340   unsigned NumElts = ResultTy->getVectorNumElements() * 8;
341 
342   // Bitcast from a 64-bit element type to a byte element type.
343   Type *VecTy = VectorType::get(Type::getInt8Ty(C), NumElts);
344   Op = Builder.CreateBitCast(Op, VecTy, "cast");
345 
346   // We'll be shuffling in zeroes.
347   Value *Res = Constant::getNullValue(VecTy);
348 
349   // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
350   // we'll just return the zero vector.
351   if (Shift < 16) {
352     int Idxs[32];
353     // 256-bit version is split into two 16-byte lanes.
354     for (unsigned l = 0; l != NumElts; l += 16)
355       for (unsigned i = 0; i != 16; ++i) {
356         unsigned Idx = NumElts + i - Shift;
357         if (Idx < NumElts)
358           Idx -= NumElts - 16; // end of lane, switch operand.
359         Idxs[l + i] = Idx + l;
360       }
361 
362     Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
363   }
364 
365   // Bitcast back to a 64-bit element type.
366   return Builder.CreateBitCast(Res, ResultTy, "cast");
367 }
368 
369 // Handles upgrading SSE2 and AVX2 PSRLDQ intrinsics by converting them
370 // to byte shuffles.
371 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, LLVMContext &C,
372                                          Value *Op,
373                                          unsigned Shift) {
374   Type *ResultTy = Op->getType();
375   unsigned NumElts = ResultTy->getVectorNumElements() * 8;
376 
377   // Bitcast from a 64-bit element type to a byte element type.
378   Type *VecTy = VectorType::get(Type::getInt8Ty(C), NumElts);
379   Op = Builder.CreateBitCast(Op, VecTy, "cast");
380 
381   // We'll be shuffling in zeroes.
382   Value *Res = Constant::getNullValue(VecTy);
383 
384   // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
385   // we'll just return the zero vector.
386   if (Shift < 16) {
387     int Idxs[32];
388     // 256-bit version is split into two 16-byte lanes.
389     for (unsigned l = 0; l != NumElts; l += 16)
390       for (unsigned i = 0; i != 16; ++i) {
391         unsigned Idx = i + Shift;
392         if (Idx >= 16)
393           Idx += NumElts - 16; // end of lane, switch operand.
394         Idxs[l + i] = Idx + l;
395       }
396 
397     Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
398   }
399 
400   // Bitcast back to a 64-bit element type.
401   return Builder.CreateBitCast(Res, ResultTy, "cast");
402 }
403 
404 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, LLVMContext &C,
405                                  Value *Ptr, Value *Data, Value *Mask,
406                                  bool Aligned) {
407   // Cast the pointer to the right type.
408   Ptr = Builder.CreateBitCast(Ptr,
409                               llvm::PointerType::getUnqual(Data->getType()));
410   unsigned Align =
411     Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1;
412 
413   // If the mask is all ones just emit a regular store.
414   if (const auto *C = dyn_cast<Constant>(Mask))
415     if (C->isAllOnesValue())
416       return Builder.CreateAlignedStore(Data, Ptr, Align);
417 
418   // Convert the mask from an integer type to a vector of i1.
419   unsigned NumElts = Data->getType()->getVectorNumElements();
420   llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(),
421                              cast<IntegerType>(Mask->getType())->getBitWidth());
422   Mask = Builder.CreateBitCast(Mask, MaskTy);
423 
424   // If we have less than 8 elements, then the starting mask was an i8 and
425   // we need to extract down to the right number of elements.
426   if (NumElts < 8) {
427     int Indices[4];
428     for (unsigned i = 0; i != NumElts; ++i)
429       Indices[i] = i;
430     Mask = Builder.CreateShuffleVector(Mask, Mask,
431                                        makeArrayRef(Indices, NumElts),
432                                        "extract");
433   }
434 
435   return Builder.CreateMaskedStore(Data, Ptr, Align, Mask);
436 }
437 
438 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, LLVMContext &C,
439                                 Value *Ptr, Value *Passthru, Value *Mask,
440                                 bool Aligned) {
441   // Cast the pointer to the right type.
442   Ptr = Builder.CreateBitCast(Ptr,
443                              llvm::PointerType::getUnqual(Passthru->getType()));
444   unsigned Align =
445     Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
446 
447   // If the mask is all ones just emit a regular store.
448   if (const auto *C = dyn_cast<Constant>(Mask))
449     if (C->isAllOnesValue())
450       return Builder.CreateAlignedLoad(Ptr, Align);
451 
452   // Convert the mask from an integer type to a vector of i1.
453   unsigned NumElts = Passthru->getType()->getVectorNumElements();
454   llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(),
455                              cast<IntegerType>(Mask->getType())->getBitWidth());
456   Mask = Builder.CreateBitCast(Mask, MaskTy);
457 
458   // If we have less than 8 elements, then the starting mask was an i8 and
459   // we need to extract down to the right number of elements.
460   if (NumElts < 8) {
461     int Indices[4];
462     for (unsigned i = 0; i != NumElts; ++i)
463       Indices[i] = i;
464     Mask = Builder.CreateShuffleVector(Mask, Mask,
465                                        makeArrayRef(Indices, NumElts),
466                                        "extract");
467   }
468 
469   return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
470 }
471 
472 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
473 // upgraded intrinsic. All argument and return casting must be provided in
474 // order to seamlessly integrate with existing context.
475 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
476   Function *F = CI->getCalledFunction();
477   LLVMContext &C = CI->getContext();
478   IRBuilder<> Builder(C);
479   Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
480 
481   assert(F && "Intrinsic call is not direct?");
482 
483   if (!NewFn) {
484     // Get the Function's name.
485     StringRef Name = F->getName();
486 
487     Value *Rep;
488     // Upgrade packed integer vector compares intrinsics to compare instructions
489     if (Name.startswith("llvm.x86.sse2.pcmpeq.") ||
490         Name.startswith("llvm.x86.avx2.pcmpeq.")) {
491       Rep = Builder.CreateICmpEQ(CI->getArgOperand(0), CI->getArgOperand(1),
492                                  "pcmpeq");
493       // need to sign extend since icmp returns vector of i1
494       Rep = Builder.CreateSExt(Rep, CI->getType(), "");
495     } else if (Name.startswith("llvm.x86.sse2.pcmpgt.") ||
496                Name.startswith("llvm.x86.avx2.pcmpgt.")) {
497       Rep = Builder.CreateICmpSGT(CI->getArgOperand(0), CI->getArgOperand(1),
498                                   "pcmpgt");
499       // need to sign extend since icmp returns vector of i1
500       Rep = Builder.CreateSExt(Rep, CI->getType(), "");
501     } else if (Name == "llvm.x86.sse2.cvtdq2pd" ||
502                Name == "llvm.x86.sse2.cvtps2pd" ||
503                Name == "llvm.x86.avx.cvtdq2.pd.256" ||
504                Name == "llvm.x86.avx.cvt.ps2.pd.256") {
505       // Lossless i32/float to double conversion.
506       // Extract the bottom elements if necessary and convert to double vector.
507       Value *Src = CI->getArgOperand(0);
508       VectorType *SrcTy = cast<VectorType>(Src->getType());
509       VectorType *DstTy = cast<VectorType>(CI->getType());
510       Rep = CI->getArgOperand(0);
511 
512       unsigned NumDstElts = DstTy->getNumElements();
513       if (NumDstElts < SrcTy->getNumElements()) {
514         assert(NumDstElts == 2 && "Unexpected vector size");
515         const int ShuffleMask[2] = { 0, 1 };
516         Rep = Builder.CreateShuffleVector(Rep, UndefValue::get(SrcTy), ShuffleMask);
517       }
518 
519       bool Int2Double = (StringRef::npos != Name.find("cvtdq2"));
520       if (Int2Double)
521         Rep = Builder.CreateSIToFP(Rep, DstTy, "cvtdq2pd");
522       else
523         Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
524     } else if (Name == "llvm.x86.sse2.cvttps2dq" ||
525                Name.startswith("llvm.x86.avx.cvtt.")) {
526       // Truncation (round to zero) float/double to i32 vector conversion.
527       Value *Src = CI->getArgOperand(0);
528       VectorType *DstTy = cast<VectorType>(CI->getType());
529       Rep = Builder.CreateFPToSI(Src, DstTy, "cvtt");
530     } else if (Name.startswith("llvm.x86.avx.movnt.")) {
531       Module *M = F->getParent();
532       SmallVector<Metadata *, 1> Elts;
533       Elts.push_back(
534           ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
535       MDNode *Node = MDNode::get(C, Elts);
536 
537       Value *Arg0 = CI->getArgOperand(0);
538       Value *Arg1 = CI->getArgOperand(1);
539 
540       // Convert the type of the pointer to a pointer to the stored type.
541       Value *BC = Builder.CreateBitCast(Arg0,
542                                         PointerType::getUnqual(Arg1->getType()),
543                                         "cast");
544       StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 32);
545       SI->setMetadata(M->getMDKindID("nontemporal"), Node);
546 
547       // Remove intrinsic.
548       CI->eraseFromParent();
549       return;
550     } else if (Name == "llvm.x86.sse2.storel.dq") {
551       Value *Arg0 = CI->getArgOperand(0);
552       Value *Arg1 = CI->getArgOperand(1);
553 
554       Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
555       Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
556       Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
557       Value *BC = Builder.CreateBitCast(Arg0,
558                                         PointerType::getUnqual(Elt->getType()),
559                                         "cast");
560       Builder.CreateAlignedStore(Elt, BC, 1);
561 
562       // Remove intrinsic.
563       CI->eraseFromParent();
564       return;
565     } else if (Name.startswith("llvm.x86.sse.storeu.") ||
566                Name.startswith("llvm.x86.sse2.storeu.") ||
567                Name.startswith("llvm.x86.avx.storeu.")) {
568       Value *Arg0 = CI->getArgOperand(0);
569       Value *Arg1 = CI->getArgOperand(1);
570 
571       Arg0 = Builder.CreateBitCast(Arg0,
572                                    PointerType::getUnqual(Arg1->getType()),
573                                    "cast");
574       Builder.CreateAlignedStore(Arg1, Arg0, 1);
575 
576       // Remove intrinsic.
577       CI->eraseFromParent();
578       return;
579     } else if (Name.startswith("llvm.x86.avx512.mask.storeu.p") ||
580                Name.startswith("llvm.x86.avx512.mask.storeu.b.") ||
581                Name.startswith("llvm.x86.avx512.mask.storeu.w.") ||
582                Name.startswith("llvm.x86.avx512.mask.storeu.d.") ||
583                Name.startswith("llvm.x86.avx512.mask.storeu.q.")) {
584       UpgradeMaskedStore(Builder, C, CI->getArgOperand(0), CI->getArgOperand(1),
585                          CI->getArgOperand(2), /*Aligned*/false);
586 
587       // Remove intrinsic.
588       CI->eraseFromParent();
589       return;
590     } else if (Name.startswith("llvm.x86.avx512.mask.store.p") ||
591                Name.startswith("llvm.x86.avx512.mask.store.b.") ||
592                Name.startswith("llvm.x86.avx512.mask.store.w.") ||
593                Name.startswith("llvm.x86.avx512.mask.store.d.") ||
594                Name.startswith("llvm.x86.avx512.mask.store.q.")) {
595       UpgradeMaskedStore(Builder, C, CI->getArgOperand(0), CI->getArgOperand(1),
596                          CI->getArgOperand(2), /*Aligned*/true);
597 
598       // Remove intrinsic.
599       CI->eraseFromParent();
600       return;
601     } else if (Name.startswith("llvm.x86.avx512.mask.loadu.p") ||
602                Name.startswith("llvm.x86.avx512.mask.loadu.b.") ||
603                Name.startswith("llvm.x86.avx512.mask.loadu.w.") ||
604                Name.startswith("llvm.x86.avx512.mask.loadu.d.") ||
605                Name.startswith("llvm.x86.avx512.mask.loadu.q.")) {
606       Rep = UpgradeMaskedLoad(Builder, C, CI->getArgOperand(0),
607                               CI->getArgOperand(1), CI->getArgOperand(2),
608                               /*Aligned*/false);
609     } else if (Name.startswith("llvm.x86.avx512.mask.load.p") ||
610                Name.startswith("llvm.x86.avx512.mask.load.b.") ||
611                Name.startswith("llvm.x86.avx512.mask.load.w.") ||
612                Name.startswith("llvm.x86.avx512.mask.load.d.") ||
613                Name.startswith("llvm.x86.avx512.mask.load.q.")) {
614       Rep = UpgradeMaskedLoad(Builder, C, CI->getArgOperand(0),
615                               CI->getArgOperand(1),CI->getArgOperand(2),
616                               /*Aligned*/true);
617     } else if (Name.startswith("llvm.x86.xop.vpcom")) {
618       Intrinsic::ID intID;
619       if (Name.endswith("ub"))
620         intID = Intrinsic::x86_xop_vpcomub;
621       else if (Name.endswith("uw"))
622         intID = Intrinsic::x86_xop_vpcomuw;
623       else if (Name.endswith("ud"))
624         intID = Intrinsic::x86_xop_vpcomud;
625       else if (Name.endswith("uq"))
626         intID = Intrinsic::x86_xop_vpcomuq;
627       else if (Name.endswith("b"))
628         intID = Intrinsic::x86_xop_vpcomb;
629       else if (Name.endswith("w"))
630         intID = Intrinsic::x86_xop_vpcomw;
631       else if (Name.endswith("d"))
632         intID = Intrinsic::x86_xop_vpcomd;
633       else if (Name.endswith("q"))
634         intID = Intrinsic::x86_xop_vpcomq;
635       else
636         llvm_unreachable("Unknown suffix");
637 
638       Name = Name.substr(18); // strip off "llvm.x86.xop.vpcom"
639       unsigned Imm;
640       if (Name.startswith("lt"))
641         Imm = 0;
642       else if (Name.startswith("le"))
643         Imm = 1;
644       else if (Name.startswith("gt"))
645         Imm = 2;
646       else if (Name.startswith("ge"))
647         Imm = 3;
648       else if (Name.startswith("eq"))
649         Imm = 4;
650       else if (Name.startswith("ne"))
651         Imm = 5;
652       else if (Name.startswith("false"))
653         Imm = 6;
654       else if (Name.startswith("true"))
655         Imm = 7;
656       else
657         llvm_unreachable("Unknown condition");
658 
659       Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID);
660       Rep =
661           Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1),
662                                      Builder.getInt8(Imm)});
663     } else if (Name == "llvm.x86.xop.vpcmov") {
664       Value *Arg0 = CI->getArgOperand(0);
665       Value *Arg1 = CI->getArgOperand(1);
666       Value *Sel = CI->getArgOperand(2);
667       unsigned NumElts = CI->getType()->getVectorNumElements();
668       Constant *MinusOne = ConstantVector::getSplat(NumElts, Builder.getInt64(-1));
669       Value *NotSel = Builder.CreateXor(Sel, MinusOne);
670       Value *Sel0 = Builder.CreateAnd(Arg0, Sel);
671       Value *Sel1 = Builder.CreateAnd(Arg1, NotSel);
672       Rep = Builder.CreateOr(Sel0, Sel1);
673     } else if (Name == "llvm.x86.sse42.crc32.64.8") {
674       Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
675                                                Intrinsic::x86_sse42_crc32_32_8);
676       Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
677       Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
678       Rep = Builder.CreateZExt(Rep, CI->getType(), "");
679     } else if (Name.startswith("llvm.x86.avx.vbroadcast")) {
680       // Replace broadcasts with a series of insertelements.
681       Type *VecTy = CI->getType();
682       Type *EltTy = VecTy->getVectorElementType();
683       unsigned EltNum = VecTy->getVectorNumElements();
684       Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
685                                           EltTy->getPointerTo());
686       Value *Load = Builder.CreateLoad(EltTy, Cast);
687       Type *I32Ty = Type::getInt32Ty(C);
688       Rep = UndefValue::get(VecTy);
689       for (unsigned I = 0; I < EltNum; ++I)
690         Rep = Builder.CreateInsertElement(Rep, Load,
691                                           ConstantInt::get(I32Ty, I));
692     } else if (Name.startswith("llvm.x86.sse41.pmovsx") ||
693                Name.startswith("llvm.x86.sse41.pmovzx") ||
694                Name.startswith("llvm.x86.avx2.pmovsx") ||
695                Name.startswith("llvm.x86.avx2.pmovzx")) {
696       VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType());
697       VectorType *DstTy = cast<VectorType>(CI->getType());
698       unsigned NumDstElts = DstTy->getNumElements();
699 
700       // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
701       SmallVector<int, 8> ShuffleMask;
702       for (int i = 0; i != (int)NumDstElts; ++i)
703         ShuffleMask.push_back(i);
704 
705       Value *SV = Builder.CreateShuffleVector(
706           CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask);
707 
708       bool DoSext = (StringRef::npos != Name.find("pmovsx"));
709       Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
710                    : Builder.CreateZExt(SV, DstTy);
711     } else if (Name == "llvm.x86.avx2.vbroadcasti128") {
712       // Replace vbroadcasts with a vector shuffle.
713       Type *VT = VectorType::get(Type::getInt64Ty(C), 2);
714       Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
715                                             PointerType::getUnqual(VT));
716       Value *Load = Builder.CreateLoad(VT, Op);
717       const int Idxs[4] = { 0, 1, 0, 1 };
718       Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
719                                         Idxs);
720     } else if (Name.startswith("llvm.x86.avx2.pbroadcast") ||
721                Name.startswith("llvm.x86.avx2.vbroadcast")) {
722       // Replace vp?broadcasts with a vector shuffle.
723       Value *Op = CI->getArgOperand(0);
724       unsigned NumElts = CI->getType()->getVectorNumElements();
725       Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
726       Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
727                                         Constant::getNullValue(MaskTy));
728     } else if (Name == "llvm.x86.sse2.psll.dq" ||
729                Name == "llvm.x86.avx2.psll.dq") {
730       // 128/256-bit shift left specified in bits.
731       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
732       Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0),
733                                        Shift / 8); // Shift is in bits.
734     } else if (Name == "llvm.x86.sse2.psrl.dq" ||
735                Name == "llvm.x86.avx2.psrl.dq") {
736       // 128/256-bit shift right specified in bits.
737       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
738       Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0),
739                                        Shift / 8); // Shift is in bits.
740     } else if (Name == "llvm.x86.sse2.psll.dq.bs" ||
741                Name == "llvm.x86.avx2.psll.dq.bs") {
742       // 128/256-bit shift left specified in bytes.
743       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
744       Rep = UpgradeX86PSLLDQIntrinsics(Builder, C, CI->getArgOperand(0), Shift);
745     } else if (Name == "llvm.x86.sse2.psrl.dq.bs" ||
746                Name == "llvm.x86.avx2.psrl.dq.bs") {
747       // 128/256-bit shift right specified in bytes.
748       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
749       Rep = UpgradeX86PSRLDQIntrinsics(Builder, C, CI->getArgOperand(0), Shift);
750     } else if (Name == "llvm.x86.sse41.pblendw" ||
751                Name.startswith("llvm.x86.sse41.blendp") ||
752                Name.startswith("llvm.x86.avx.blend.p") ||
753                Name == "llvm.x86.avx2.pblendw" ||
754                Name.startswith("llvm.x86.avx2.pblendd.")) {
755       Value *Op0 = CI->getArgOperand(0);
756       Value *Op1 = CI->getArgOperand(1);
757       unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
758       VectorType *VecTy = cast<VectorType>(CI->getType());
759       unsigned NumElts = VecTy->getNumElements();
760 
761       SmallVector<Constant*, 16> Idxs;
762       for (unsigned i = 0; i != NumElts; ++i) {
763         unsigned Idx = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
764         Idxs.push_back(Builder.getInt32(Idx));
765       }
766 
767       Rep = Builder.CreateShuffleVector(Op0, Op1, ConstantVector::get(Idxs));
768     } else if (Name.startswith("llvm.x86.avx.vinsertf128.") ||
769                Name == "llvm.x86.avx2.vinserti128") {
770       Value *Op0 = CI->getArgOperand(0);
771       Value *Op1 = CI->getArgOperand(1);
772       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
773       VectorType *VecTy = cast<VectorType>(CI->getType());
774       unsigned NumElts = VecTy->getNumElements();
775 
776       // Mask off the high bits of the immediate value; hardware ignores those.
777       Imm = Imm & 1;
778 
779       // Extend the second operand into a vector that is twice as big.
780       Value *UndefV = UndefValue::get(Op1->getType());
781       SmallVector<Constant*, 8> Idxs;
782       for (unsigned i = 0; i != NumElts; ++i) {
783         Idxs.push_back(Builder.getInt32(i));
784       }
785       Rep = Builder.CreateShuffleVector(Op1, UndefV, ConstantVector::get(Idxs));
786 
787       // Insert the second operand into the first operand.
788 
789       // Note that there is no guarantee that instruction lowering will actually
790       // produce a vinsertf128 instruction for the created shuffles. In
791       // particular, the 0 immediate case involves no lane changes, so it can
792       // be handled as a blend.
793 
794       // Example of shuffle mask for 32-bit elements:
795       // Imm = 1  <i32 0, i32 1, i32 2,  i32 3,  i32 8, i32 9, i32 10, i32 11>
796       // Imm = 0  <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6,  i32 7 >
797 
798       SmallVector<Constant*, 8> Idxs2;
799       // The low half of the result is either the low half of the 1st operand
800       // or the low half of the 2nd operand (the inserted vector).
801       for (unsigned i = 0; i != NumElts / 2; ++i) {
802         unsigned Idx = Imm ? i : (i + NumElts);
803         Idxs2.push_back(Builder.getInt32(Idx));
804       }
805       // The high half of the result is either the low half of the 2nd operand
806       // (the inserted vector) or the high half of the 1st operand.
807       for (unsigned i = NumElts / 2; i != NumElts; ++i) {
808         unsigned Idx = Imm ? (i + NumElts / 2) : i;
809         Idxs2.push_back(Builder.getInt32(Idx));
810       }
811       Rep = Builder.CreateShuffleVector(Op0, Rep, ConstantVector::get(Idxs2));
812     } else if (Name.startswith("llvm.x86.avx.vextractf128.") ||
813                Name == "llvm.x86.avx2.vextracti128") {
814       Value *Op0 = CI->getArgOperand(0);
815       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
816       VectorType *VecTy = cast<VectorType>(CI->getType());
817       unsigned NumElts = VecTy->getNumElements();
818 
819       // Mask off the high bits of the immediate value; hardware ignores those.
820       Imm = Imm & 1;
821 
822       // Get indexes for either the high half or low half of the input vector.
823       SmallVector<Constant*, 4> Idxs(NumElts);
824       for (unsigned i = 0; i != NumElts; ++i) {
825         unsigned Idx = Imm ? (i + NumElts) : i;
826         Idxs[i] = Builder.getInt32(Idx);
827       }
828 
829       Value *UndefV = UndefValue::get(Op0->getType());
830       Rep = Builder.CreateShuffleVector(Op0, UndefV, ConstantVector::get(Idxs));
831     } else if (Name == "llvm.stackprotectorcheck") {
832       Rep = nullptr;
833     } else {
834       bool PD128 = false, PD256 = false, PS128 = false, PS256 = false;
835       if (Name == "llvm.x86.avx.vpermil.pd.256")
836         PD256 = true;
837       else if (Name == "llvm.x86.avx.vpermil.pd")
838         PD128 = true;
839       else if (Name == "llvm.x86.avx.vpermil.ps.256")
840         PS256 = true;
841       else if (Name == "llvm.x86.avx.vpermil.ps")
842         PS128 = true;
843 
844       if (PD256 || PD128 || PS256 || PS128) {
845         Value *Op0 = CI->getArgOperand(0);
846         unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
847         SmallVector<Constant*, 8> Idxs;
848 
849         if (PD128)
850           for (unsigned i = 0; i != 2; ++i)
851             Idxs.push_back(Builder.getInt32((Imm >> i) & 0x1));
852         else if (PD256)
853           for (unsigned l = 0; l != 4; l+=2)
854             for (unsigned i = 0; i != 2; ++i)
855               Idxs.push_back(Builder.getInt32(((Imm >> (l+i)) & 0x1) + l));
856         else if (PS128)
857           for (unsigned i = 0; i != 4; ++i)
858             Idxs.push_back(Builder.getInt32((Imm >> (2 * i)) & 0x3));
859         else if (PS256)
860           for (unsigned l = 0; l != 8; l+=4)
861             for (unsigned i = 0; i != 4; ++i)
862               Idxs.push_back(Builder.getInt32(((Imm >> (2 * i)) & 0x3) + l));
863         else
864           llvm_unreachable("Unexpected function");
865 
866         Rep = Builder.CreateShuffleVector(Op0, Op0, ConstantVector::get(Idxs));
867       } else {
868         llvm_unreachable("Unknown function for CallInst upgrade.");
869       }
870     }
871 
872     if (Rep)
873       CI->replaceAllUsesWith(Rep);
874     CI->eraseFromParent();
875     return;
876   }
877 
878   std::string Name = CI->getName();
879   if (!Name.empty())
880     CI->setName(Name + ".old");
881 
882   switch (NewFn->getIntrinsicID()) {
883   default:
884     llvm_unreachable("Unknown function for CallInst upgrade.");
885 
886   case Intrinsic::arm_neon_vld1:
887   case Intrinsic::arm_neon_vld2:
888   case Intrinsic::arm_neon_vld3:
889   case Intrinsic::arm_neon_vld4:
890   case Intrinsic::arm_neon_vld2lane:
891   case Intrinsic::arm_neon_vld3lane:
892   case Intrinsic::arm_neon_vld4lane:
893   case Intrinsic::arm_neon_vst1:
894   case Intrinsic::arm_neon_vst2:
895   case Intrinsic::arm_neon_vst3:
896   case Intrinsic::arm_neon_vst4:
897   case Intrinsic::arm_neon_vst2lane:
898   case Intrinsic::arm_neon_vst3lane:
899   case Intrinsic::arm_neon_vst4lane: {
900     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
901                                  CI->arg_operands().end());
902     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
903     CI->eraseFromParent();
904     return;
905   }
906 
907   case Intrinsic::ctlz:
908   case Intrinsic::cttz:
909     assert(CI->getNumArgOperands() == 1 &&
910            "Mismatch between function args and call args");
911     CI->replaceAllUsesWith(Builder.CreateCall(
912         NewFn, {CI->getArgOperand(0), Builder.getFalse()}, Name));
913     CI->eraseFromParent();
914     return;
915 
916   case Intrinsic::objectsize:
917     CI->replaceAllUsesWith(Builder.CreateCall(
918         NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)}, Name));
919     CI->eraseFromParent();
920     return;
921 
922   case Intrinsic::ctpop: {
923     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {CI->getArgOperand(0)}));
924     CI->eraseFromParent();
925     return;
926   }
927 
928   case Intrinsic::x86_xop_vfrcz_ss:
929   case Intrinsic::x86_xop_vfrcz_sd:
930     CI->replaceAllUsesWith(
931         Builder.CreateCall(NewFn, {CI->getArgOperand(1)}, Name));
932     CI->eraseFromParent();
933     return;
934 
935   case Intrinsic::x86_xop_vpermil2pd:
936   case Intrinsic::x86_xop_vpermil2ps:
937   case Intrinsic::x86_xop_vpermil2pd_256:
938   case Intrinsic::x86_xop_vpermil2ps_256: {
939     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
940                                  CI->arg_operands().end());
941     VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
942     VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
943     Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
944     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args, Name));
945     CI->eraseFromParent();
946     return;
947   }
948 
949   case Intrinsic::x86_sse41_ptestc:
950   case Intrinsic::x86_sse41_ptestz:
951   case Intrinsic::x86_sse41_ptestnzc: {
952     // The arguments for these intrinsics used to be v4f32, and changed
953     // to v2i64. This is purely a nop, since those are bitwise intrinsics.
954     // So, the only thing required is a bitcast for both arguments.
955     // First, check the arguments have the old type.
956     Value *Arg0 = CI->getArgOperand(0);
957     if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4))
958       return;
959 
960     // Old intrinsic, add bitcasts
961     Value *Arg1 = CI->getArgOperand(1);
962 
963     Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
964 
965     Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
966     Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
967 
968     CallInst *NewCall = Builder.CreateCall(NewFn, {BC0, BC1}, Name);
969     CI->replaceAllUsesWith(NewCall);
970     CI->eraseFromParent();
971     return;
972   }
973 
974   case Intrinsic::x86_sse41_insertps:
975   case Intrinsic::x86_sse41_dppd:
976   case Intrinsic::x86_sse41_dpps:
977   case Intrinsic::x86_sse41_mpsadbw:
978   case Intrinsic::x86_avx_dp_ps_256:
979   case Intrinsic::x86_avx2_mpsadbw: {
980     // Need to truncate the last argument from i32 to i8 -- this argument models
981     // an inherently 8-bit immediate operand to these x86 instructions.
982     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
983                                  CI->arg_operands().end());
984 
985     // Replace the last argument with a trunc.
986     Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
987 
988     CallInst *NewCall = Builder.CreateCall(NewFn, Args);
989     CI->replaceAllUsesWith(NewCall);
990     CI->eraseFromParent();
991     return;
992   }
993 
994   case Intrinsic::thread_pointer: {
995     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {}));
996     CI->eraseFromParent();
997     return;
998   }
999   }
1000 }
1001 
1002 void llvm::UpgradeCallsToIntrinsic(Function *F) {
1003   assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
1004 
1005   // Check if this function should be upgraded and get the replacement function
1006   // if there is one.
1007   Function *NewFn;
1008   if (UpgradeIntrinsicFunction(F, NewFn)) {
1009     // Replace all users of the old function with the new function or new
1010     // instructions. This is not a range loop because the call is deleted.
1011     for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; )
1012       if (CallInst *CI = dyn_cast<CallInst>(*UI++))
1013         UpgradeIntrinsicCall(CI, NewFn);
1014 
1015     // Remove old function, no longer used, from the module.
1016     F->eraseFromParent();
1017   }
1018 }
1019 
1020 void llvm::UpgradeInstWithTBAATag(Instruction *I) {
1021   MDNode *MD = I->getMetadata(LLVMContext::MD_tbaa);
1022   assert(MD && "UpgradeInstWithTBAATag should have a TBAA tag");
1023   // Check if the tag uses struct-path aware TBAA format.
1024   if (isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3)
1025     return;
1026 
1027   if (MD->getNumOperands() == 3) {
1028     Metadata *Elts[] = {MD->getOperand(0), MD->getOperand(1)};
1029     MDNode *ScalarType = MDNode::get(I->getContext(), Elts);
1030     // Create a MDNode <ScalarType, ScalarType, offset 0, const>
1031     Metadata *Elts2[] = {ScalarType, ScalarType,
1032                          ConstantAsMetadata::get(Constant::getNullValue(
1033                              Type::getInt64Ty(I->getContext()))),
1034                          MD->getOperand(2)};
1035     I->setMetadata(LLVMContext::MD_tbaa, MDNode::get(I->getContext(), Elts2));
1036   } else {
1037     // Create a MDNode <MD, MD, offset 0>
1038     Metadata *Elts[] = {MD, MD, ConstantAsMetadata::get(Constant::getNullValue(
1039                                     Type::getInt64Ty(I->getContext())))};
1040     I->setMetadata(LLVMContext::MD_tbaa, MDNode::get(I->getContext(), Elts));
1041   }
1042 }
1043 
1044 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
1045                                       Instruction *&Temp) {
1046   if (Opc != Instruction::BitCast)
1047     return nullptr;
1048 
1049   Temp = nullptr;
1050   Type *SrcTy = V->getType();
1051   if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
1052       SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
1053     LLVMContext &Context = V->getContext();
1054 
1055     // We have no information about target data layout, so we assume that
1056     // the maximum pointer size is 64bit.
1057     Type *MidTy = Type::getInt64Ty(Context);
1058     Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
1059 
1060     return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
1061   }
1062 
1063   return nullptr;
1064 }
1065 
1066 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
1067   if (Opc != Instruction::BitCast)
1068     return nullptr;
1069 
1070   Type *SrcTy = C->getType();
1071   if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
1072       SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
1073     LLVMContext &Context = C->getContext();
1074 
1075     // We have no information about target data layout, so we assume that
1076     // the maximum pointer size is 64bit.
1077     Type *MidTy = Type::getInt64Ty(Context);
1078 
1079     return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
1080                                      DestTy);
1081   }
1082 
1083   return nullptr;
1084 }
1085 
1086 /// Check the debug info version number, if it is out-dated, drop the debug
1087 /// info. Return true if module is modified.
1088 bool llvm::UpgradeDebugInfo(Module &M) {
1089   unsigned Version = getDebugMetadataVersionFromModule(M);
1090   if (Version == DEBUG_METADATA_VERSION)
1091     return false;
1092 
1093   bool RetCode = StripDebugInfo(M);
1094   if (RetCode) {
1095     DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
1096     M.getContext().diagnose(DiagVersion);
1097   }
1098   return RetCode;
1099 }
1100 
1101 bool llvm::UpgradeModuleFlags(Module &M) {
1102   const NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
1103   if (!ModFlags)
1104     return false;
1105 
1106   bool HasObjCFlag = false, HasClassProperties = false;
1107   for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
1108     MDNode *Op = ModFlags->getOperand(I);
1109     if (Op->getNumOperands() < 2)
1110       continue;
1111     MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1112     if (!ID)
1113       continue;
1114     if (ID->getString() == "Objective-C Image Info Version")
1115       HasObjCFlag = true;
1116     if (ID->getString() == "Objective-C Class Properties")
1117       HasClassProperties = true;
1118   }
1119   // "Objective-C Class Properties" is recently added for Objective-C. We
1120   // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
1121   // flag of value 0, so we can correclty report error when trying to link
1122   // an ObjC bitcode without this module flag with an ObjC bitcode with this
1123   // module flag.
1124   if (HasObjCFlag && !HasClassProperties) {
1125     M.addModuleFlag(llvm::Module::Error, "Objective-C Class Properties",
1126                     (uint32_t)0);
1127     return true;
1128   }
1129   return false;
1130 }
1131 
1132 static bool isOldLoopArgument(Metadata *MD) {
1133   auto *T = dyn_cast_or_null<MDTuple>(MD);
1134   if (!T)
1135     return false;
1136   if (T->getNumOperands() < 1)
1137     return false;
1138   auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
1139   if (!S)
1140     return false;
1141   return S->getString().startswith("llvm.vectorizer.");
1142 }
1143 
1144 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
1145   StringRef OldPrefix = "llvm.vectorizer.";
1146   assert(OldTag.startswith(OldPrefix) && "Expected old prefix");
1147 
1148   if (OldTag == "llvm.vectorizer.unroll")
1149     return MDString::get(C, "llvm.loop.interleave.count");
1150 
1151   return MDString::get(
1152       C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
1153              .str());
1154 }
1155 
1156 static Metadata *upgradeLoopArgument(Metadata *MD) {
1157   auto *T = dyn_cast_or_null<MDTuple>(MD);
1158   if (!T)
1159     return MD;
1160   if (T->getNumOperands() < 1)
1161     return MD;
1162   auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
1163   if (!OldTag)
1164     return MD;
1165   if (!OldTag->getString().startswith("llvm.vectorizer."))
1166     return MD;
1167 
1168   // This has an old tag.  Upgrade it.
1169   SmallVector<Metadata *, 8> Ops;
1170   Ops.reserve(T->getNumOperands());
1171   Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
1172   for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
1173     Ops.push_back(T->getOperand(I));
1174 
1175   return MDTuple::get(T->getContext(), Ops);
1176 }
1177 
1178 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
1179   auto *T = dyn_cast<MDTuple>(&N);
1180   if (!T)
1181     return &N;
1182 
1183   if (!llvm::any_of(T->operands(), isOldLoopArgument))
1184     return &N;
1185 
1186   SmallVector<Metadata *, 8> Ops;
1187   Ops.reserve(T->getNumOperands());
1188   for (Metadata *MD : T->operands())
1189     Ops.push_back(upgradeLoopArgument(MD));
1190 
1191   return MDTuple::get(T->getContext(), Ops);
1192 }
1193