1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the auto-upgrade helper functions.
11 // This is where deprecated IR intrinsics and other IR features are updated to
12 // current specifications.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/IR/AutoUpgrade.h"
17 #include "llvm/IR/CFG.h"
18 #include "llvm/IR/CallSite.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/DIBuilder.h"
21 #include "llvm/IR/DebugInfo.h"
22 #include "llvm/IR/DiagnosticInfo.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/Instruction.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/IR/Module.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/Regex.h"
31 #include <cstring>
32 using namespace llvm;
33 
34 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
35 
36 // Upgrade the declarations of the SSE4.1 functions whose arguments have
37 // changed their type from v4f32 to v2i64.
38 static bool UpgradeSSE41Function(Function* F, Intrinsic::ID IID,
39                                  Function *&NewFn) {
40   // Check whether this is an old version of the function, which received
41   // v4f32 arguments.
42   Type *Arg0Type = F->getFunctionType()->getParamType(0);
43   if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4))
44     return false;
45 
46   // Yes, it's old, replace it with new version.
47   rename(F);
48   NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
49   return true;
50 }
51 
52 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
53 // arguments have changed their type from i32 to i8.
54 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
55                                              Function *&NewFn) {
56   // Check that the last argument is an i32.
57   Type *LastArgType = F->getFunctionType()->getParamType(
58      F->getFunctionType()->getNumParams() - 1);
59   if (!LastArgType->isIntegerTy(32))
60     return false;
61 
62   // Move this function aside and map down.
63   rename(F);
64   NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
65   return true;
66 }
67 
68 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
69   assert(F && "Illegal to upgrade a non-existent Function.");
70 
71   // Quickly eliminate it, if it's not a candidate.
72   StringRef Name = F->getName();
73   if (Name.size() <= 8 || !Name.startswith("llvm."))
74     return false;
75   Name = Name.substr(5); // Strip off "llvm."
76 
77   switch (Name[0]) {
78   default: break;
79   case 'a': {
80     if (Name.startswith("arm.neon.vclz")) {
81       Type* args[2] = {
82         F->arg_begin()->getType(),
83         Type::getInt1Ty(F->getContext())
84       };
85       // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
86       // the end of the name. Change name from llvm.arm.neon.vclz.* to
87       //  llvm.ctlz.*
88       FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
89       NewFn = Function::Create(fType, F->getLinkage(),
90                                "llvm.ctlz." + Name.substr(14), F->getParent());
91       return true;
92     }
93     if (Name.startswith("arm.neon.vcnt")) {
94       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
95                                         F->arg_begin()->getType());
96       return true;
97     }
98     Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
99     if (vldRegex.match(Name)) {
100       auto fArgs = F->getFunctionType()->params();
101       SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
102       // Can't use Intrinsic::getDeclaration here as the return types might
103       // then only be structurally equal.
104       FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
105       NewFn = Function::Create(fType, F->getLinkage(),
106                                "llvm." + Name + ".p0i8", F->getParent());
107       return true;
108     }
109     Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
110     if (vstRegex.match(Name)) {
111       static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
112                                                 Intrinsic::arm_neon_vst2,
113                                                 Intrinsic::arm_neon_vst3,
114                                                 Intrinsic::arm_neon_vst4};
115 
116       static const Intrinsic::ID StoreLaneInts[] = {
117         Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
118         Intrinsic::arm_neon_vst4lane
119       };
120 
121       auto fArgs = F->getFunctionType()->params();
122       Type *Tys[] = {fArgs[0], fArgs[1]};
123       if (Name.find("lane") == StringRef::npos)
124         NewFn = Intrinsic::getDeclaration(F->getParent(),
125                                           StoreInts[fArgs.size() - 3], Tys);
126       else
127         NewFn = Intrinsic::getDeclaration(F->getParent(),
128                                           StoreLaneInts[fArgs.size() - 5], Tys);
129       return true;
130     }
131     if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
132       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
133       return true;
134     }
135     break;
136   }
137 
138   case 'c': {
139     if (Name.startswith("ctlz.") && F->arg_size() == 1) {
140       rename(F);
141       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
142                                         F->arg_begin()->getType());
143       return true;
144     }
145     if (Name.startswith("cttz.") && F->arg_size() == 1) {
146       rename(F);
147       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
148                                         F->arg_begin()->getType());
149       return true;
150     }
151     break;
152   }
153   case 'i': {
154     if (Name.startswith("invariant.start")) {
155       auto Args = F->getFunctionType()->params();
156       Type* ObjectPtr[1] = {Args[1]};
157       if (F->getName() !=
158           Intrinsic::getName(Intrinsic::invariant_start, ObjectPtr)) {
159         rename(F);
160         NewFn = Intrinsic::getDeclaration(
161             F->getParent(), Intrinsic::invariant_start, ObjectPtr);
162         return true;
163       }
164     }
165     if (Name.startswith("invariant.end")) {
166       auto Args = F->getFunctionType()->params();
167       Type* ObjectPtr[1] = {Args[2]};
168       if (F->getName() !=
169           Intrinsic::getName(Intrinsic::invariant_end, ObjectPtr)) {
170         rename(F);
171         NewFn = Intrinsic::getDeclaration(F->getParent(),
172                                           Intrinsic::invariant_end, ObjectPtr);
173         return true;
174       }
175     }
176     break;
177   }
178   case 'm': {
179     if (Name.startswith("masked.load.")) {
180       Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
181       if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) {
182         rename(F);
183         NewFn = Intrinsic::getDeclaration(F->getParent(),
184                                           Intrinsic::masked_load,
185                                           Tys);
186         return true;
187       }
188     }
189     if (Name.startswith("masked.store.")) {
190       auto Args = F->getFunctionType()->params();
191       Type *Tys[] = { Args[0], Args[1] };
192       if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) {
193         rename(F);
194         NewFn = Intrinsic::getDeclaration(F->getParent(),
195                                           Intrinsic::masked_store,
196                                           Tys);
197         return true;
198       }
199     }
200     break;
201   }
202 
203   case 'o':
204     // We only need to change the name to match the mangling including the
205     // address space.
206     if (F->arg_size() == 2 && Name.startswith("objectsize.")) {
207       Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
208       if (F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
209         rename(F);
210         NewFn = Intrinsic::getDeclaration(F->getParent(),
211                                           Intrinsic::objectsize, Tys);
212         return true;
213       }
214     }
215     break;
216 
217   case 's':
218     if (Name == "stackprotectorcheck") {
219       NewFn = nullptr;
220       return true;
221     }
222 
223   case 'x': {
224     bool IsX86 = Name.startswith("x86.");
225     if (IsX86)
226       Name = Name.substr(4);
227 
228     if (IsX86 &&
229         (Name.startswith("sse2.pcmpeq.") ||
230          Name.startswith("sse2.pcmpgt.") ||
231          Name.startswith("avx2.pcmpeq.") ||
232          Name.startswith("avx2.pcmpgt.") ||
233          Name.startswith("avx512.mask.pcmpeq.") ||
234          Name.startswith("avx512.mask.pcmpgt.") ||
235          Name == "sse41.pmaxsb" ||
236          Name == "sse2.pmaxs.w" ||
237          Name == "sse41.pmaxsd" ||
238          Name == "sse2.pmaxu.b" ||
239          Name == "sse41.pmaxuw" ||
240          Name == "sse41.pmaxud" ||
241          Name == "sse41.pminsb" ||
242          Name == "sse2.pmins.w" ||
243          Name == "sse41.pminsd" ||
244          Name == "sse2.pminu.b" ||
245          Name == "sse41.pminuw" ||
246          Name == "sse41.pminud" ||
247          Name.startswith("avx2.pmax") ||
248          Name.startswith("avx2.pmin") ||
249          Name.startswith("avx2.vbroadcast") ||
250          Name.startswith("avx2.pbroadcast") ||
251          Name.startswith("avx.vpermil.") ||
252          Name.startswith("sse2.pshuf") ||
253          Name.startswith("avx512.pbroadcast") ||
254          Name.startswith("avx512.mask.broadcast.s") ||
255          Name.startswith("avx512.mask.movddup") ||
256          Name.startswith("avx512.mask.movshdup") ||
257          Name.startswith("avx512.mask.movsldup") ||
258          Name.startswith("avx512.mask.pshuf.d.") ||
259          Name.startswith("avx512.mask.pshufl.w.") ||
260          Name.startswith("avx512.mask.pshufh.w.") ||
261          Name.startswith("avx512.mask.shuf.p") ||
262          Name.startswith("avx512.mask.vpermil.p") ||
263          Name.startswith("avx512.mask.perm.df.") ||
264          Name.startswith("avx512.mask.perm.di.") ||
265          Name.startswith("avx512.mask.punpckl") ||
266          Name.startswith("avx512.mask.punpckh") ||
267          Name.startswith("avx512.mask.unpckl.") ||
268          Name.startswith("avx512.mask.unpckh.") ||
269          Name.startswith("avx512.mask.pand.") ||
270          Name.startswith("avx512.mask.pandn.") ||
271          Name.startswith("avx512.mask.por.") ||
272          Name.startswith("avx512.mask.pxor.") ||
273          Name.startswith("avx512.mask.and.") ||
274          Name.startswith("avx512.mask.andn.") ||
275          Name.startswith("avx512.mask.or.") ||
276          Name.startswith("avx512.mask.xor.") ||
277          Name.startswith("avx512.mask.padd.") ||
278          Name.startswith("avx512.mask.psub.") ||
279          Name.startswith("avx512.mask.pmull.") ||
280          Name.startswith("avx512.mask.add.pd.128") ||
281          Name.startswith("avx512.mask.add.pd.256") ||
282          Name.startswith("avx512.mask.add.ps.128") ||
283          Name.startswith("avx512.mask.add.ps.256") ||
284          Name.startswith("avx512.mask.div.pd.128") ||
285          Name.startswith("avx512.mask.div.pd.256") ||
286          Name.startswith("avx512.mask.div.ps.128") ||
287          Name.startswith("avx512.mask.div.ps.256") ||
288          Name.startswith("avx512.mask.mul.pd.128") ||
289          Name.startswith("avx512.mask.mul.pd.256") ||
290          Name.startswith("avx512.mask.mul.ps.128") ||
291          Name.startswith("avx512.mask.mul.ps.256") ||
292          Name.startswith("avx512.mask.sub.pd.128") ||
293          Name.startswith("avx512.mask.sub.pd.256") ||
294          Name.startswith("avx512.mask.sub.ps.128") ||
295          Name.startswith("avx512.mask.sub.ps.256") ||
296          Name.startswith("sse41.pmovsx") ||
297          Name.startswith("sse41.pmovzx") ||
298          Name.startswith("avx2.pmovsx") ||
299          Name.startswith("avx2.pmovzx") ||
300          Name == "sse2.cvtdq2pd" ||
301          Name == "sse2.cvtps2pd" ||
302          Name == "avx.cvtdq2.pd.256" ||
303          Name == "avx.cvt.ps2.pd.256" ||
304          Name.startswith("avx.vinsertf128.") ||
305          Name == "avx2.vinserti128" ||
306          Name.startswith("avx.vextractf128.") ||
307          Name == "avx2.vextracti128" ||
308          Name.startswith("sse4a.movnt.") ||
309          Name.startswith("avx.movnt.") ||
310          Name.startswith("avx512.storent.") ||
311          Name == "sse2.storel.dq" ||
312          Name.startswith("sse.storeu.") ||
313          Name.startswith("sse2.storeu.") ||
314          Name.startswith("avx.storeu.") ||
315          Name.startswith("avx512.mask.storeu.") ||
316          Name.startswith("avx512.mask.store.p") ||
317          Name.startswith("avx512.mask.store.b.") ||
318          Name.startswith("avx512.mask.store.w.") ||
319          Name.startswith("avx512.mask.store.d.") ||
320          Name.startswith("avx512.mask.store.q.") ||
321          Name.startswith("avx512.mask.loadu.") ||
322          Name.startswith("avx512.mask.load.") ||
323          Name == "sse42.crc32.64.8" ||
324          Name.startswith("avx.vbroadcast.s") ||
325          Name.startswith("avx512.mask.palignr.") ||
326          Name.startswith("sse2.psll.dq") ||
327          Name.startswith("sse2.psrl.dq") ||
328          Name.startswith("avx2.psll.dq") ||
329          Name.startswith("avx2.psrl.dq") ||
330          Name.startswith("avx512.psll.dq") ||
331          Name.startswith("avx512.psrl.dq") ||
332          Name == "sse41.pblendw" ||
333          Name.startswith("sse41.blendp") ||
334          Name.startswith("avx.blend.p") ||
335          Name == "avx2.pblendw" ||
336          Name.startswith("avx2.pblendd.") ||
337          Name.startswith("avx.vbroadcastf128") ||
338          Name == "avx2.vbroadcasti128" ||
339          Name == "xop.vpcmov" ||
340          (Name.startswith("xop.vpcom") && F->arg_size() == 2))) {
341       NewFn = nullptr;
342       return true;
343     }
344     // SSE4.1 ptest functions may have an old signature.
345     if (IsX86 && Name.startswith("sse41.ptest")) {
346       if (Name.substr(11) == "c")
347         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestc, NewFn);
348       if (Name.substr(11) == "z")
349         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestz, NewFn);
350       if (Name.substr(11) == "nzc")
351         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
352     }
353     // Several blend and other instructions with masks used the wrong number of
354     // bits.
355     if (IsX86 && Name == "sse41.insertps")
356       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
357                                               NewFn);
358     if (IsX86 && Name == "sse41.dppd")
359       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
360                                               NewFn);
361     if (IsX86 && Name == "sse41.dpps")
362       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
363                                               NewFn);
364     if (IsX86 && Name == "sse41.mpsadbw")
365       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
366                                               NewFn);
367     if (IsX86 && Name == "avx.dp.ps.256")
368       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
369                                               NewFn);
370     if (IsX86 && Name == "avx2.mpsadbw")
371       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
372                                               NewFn);
373 
374     // frcz.ss/sd may need to have an argument dropped
375     if (IsX86 && Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
376       rename(F);
377       NewFn = Intrinsic::getDeclaration(F->getParent(),
378                                         Intrinsic::x86_xop_vfrcz_ss);
379       return true;
380     }
381     if (IsX86 && Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
382       rename(F);
383       NewFn = Intrinsic::getDeclaration(F->getParent(),
384                                         Intrinsic::x86_xop_vfrcz_sd);
385       return true;
386     }
387     if (IsX86 && (Name.startswith("avx512.mask.pslli.") ||
388                   Name.startswith("avx512.mask.psrai.") ||
389                   Name.startswith("avx512.mask.psrli."))) {
390       Intrinsic::ID ShiftID;
391       if (Name.slice(12, 16) == "psll")
392         ShiftID = Name[18] == 'd' ? Intrinsic::x86_avx512_mask_psll_di_512
393                                   : Intrinsic::x86_avx512_mask_psll_qi_512;
394       else if (Name.slice(12, 16) == "psra")
395         ShiftID = Name[18] == 'd' ? Intrinsic::x86_avx512_mask_psra_di_512
396                                   : Intrinsic::x86_avx512_mask_psra_qi_512;
397       else
398         ShiftID = Name[18] == 'd' ? Intrinsic::x86_avx512_mask_psrl_di_512
399                                   : Intrinsic::x86_avx512_mask_psrl_qi_512;
400       rename(F);
401       NewFn = Intrinsic::getDeclaration(F->getParent(), ShiftID);
402       return true;
403     }
404     // Fix the FMA4 intrinsics to remove the 4
405     if (IsX86 && Name.startswith("fma4.")) {
406       rename(F);
407       NewFn = F;
408       return true;
409     }
410     // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
411     if (IsX86 && Name.startswith("xop.vpermil2")) {
412       auto Params = F->getFunctionType()->params();
413       auto Idx = Params[2];
414       if (Idx->getScalarType()->isFloatingPointTy()) {
415         rename(F);
416         unsigned IdxSize = Idx->getPrimitiveSizeInBits();
417         unsigned EltSize = Idx->getScalarSizeInBits();
418         Intrinsic::ID Permil2ID;
419         if (EltSize == 64 && IdxSize == 128)
420           Permil2ID = Intrinsic::x86_xop_vpermil2pd;
421         else if (EltSize == 32 && IdxSize == 128)
422           Permil2ID = Intrinsic::x86_xop_vpermil2ps;
423         else if (EltSize == 64 && IdxSize == 256)
424           Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
425         else
426           Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
427         NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
428         return true;
429       }
430     }
431     break;
432   }
433   }
434 
435   //  This may not belong here. This function is effectively being overloaded
436   //  to both detect an intrinsic which needs upgrading, and to provide the
437   //  upgraded form of the intrinsic. We should perhaps have two separate
438   //  functions for this.
439   return false;
440 }
441 
442 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
443   NewFn = nullptr;
444   bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
445   assert(F != NewFn && "Intrinsic function upgraded to the same function");
446 
447   // Upgrade intrinsic attributes.  This does not change the function.
448   if (NewFn)
449     F = NewFn;
450   if (Intrinsic::ID id = F->getIntrinsicID())
451     F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
452   return Upgraded;
453 }
454 
455 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
456   // Nothing to do yet.
457   return false;
458 }
459 
460 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
461 // to byte shuffles.
462 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
463                                          Value *Op, unsigned Shift) {
464   Type *ResultTy = Op->getType();
465   unsigned NumElts = ResultTy->getVectorNumElements() * 8;
466 
467   // Bitcast from a 64-bit element type to a byte element type.
468   Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
469   Op = Builder.CreateBitCast(Op, VecTy, "cast");
470 
471   // We'll be shuffling in zeroes.
472   Value *Res = Constant::getNullValue(VecTy);
473 
474   // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
475   // we'll just return the zero vector.
476   if (Shift < 16) {
477     uint32_t Idxs[64];
478     // 256/512-bit version is split into 2/4 16-byte lanes.
479     for (unsigned l = 0; l != NumElts; l += 16)
480       for (unsigned i = 0; i != 16; ++i) {
481         unsigned Idx = NumElts + i - Shift;
482         if (Idx < NumElts)
483           Idx -= NumElts - 16; // end of lane, switch operand.
484         Idxs[l + i] = Idx + l;
485       }
486 
487     Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
488   }
489 
490   // Bitcast back to a 64-bit element type.
491   return Builder.CreateBitCast(Res, ResultTy, "cast");
492 }
493 
494 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
495 // to byte shuffles.
496 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
497                                          unsigned Shift) {
498   Type *ResultTy = Op->getType();
499   unsigned NumElts = ResultTy->getVectorNumElements() * 8;
500 
501   // Bitcast from a 64-bit element type to a byte element type.
502   Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
503   Op = Builder.CreateBitCast(Op, VecTy, "cast");
504 
505   // We'll be shuffling in zeroes.
506   Value *Res = Constant::getNullValue(VecTy);
507 
508   // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
509   // we'll just return the zero vector.
510   if (Shift < 16) {
511     uint32_t Idxs[64];
512     // 256/512-bit version is split into 2/4 16-byte lanes.
513     for (unsigned l = 0; l != NumElts; l += 16)
514       for (unsigned i = 0; i != 16; ++i) {
515         unsigned Idx = i + Shift;
516         if (Idx >= 16)
517           Idx += NumElts - 16; // end of lane, switch operand.
518         Idxs[l + i] = Idx + l;
519       }
520 
521     Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
522   }
523 
524   // Bitcast back to a 64-bit element type.
525   return Builder.CreateBitCast(Res, ResultTy, "cast");
526 }
527 
528 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
529                             unsigned NumElts) {
530   llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(),
531                              cast<IntegerType>(Mask->getType())->getBitWidth());
532   Mask = Builder.CreateBitCast(Mask, MaskTy);
533 
534   // If we have less than 8 elements, then the starting mask was an i8 and
535   // we need to extract down to the right number of elements.
536   if (NumElts < 8) {
537     uint32_t Indices[4];
538     for (unsigned i = 0; i != NumElts; ++i)
539       Indices[i] = i;
540     Mask = Builder.CreateShuffleVector(Mask, Mask,
541                                        makeArrayRef(Indices, NumElts),
542                                        "extract");
543   }
544 
545   return Mask;
546 }
547 
548 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
549                             Value *Op0, Value *Op1) {
550   // If the mask is all ones just emit the align operation.
551   if (const auto *C = dyn_cast<Constant>(Mask))
552     if (C->isAllOnesValue())
553       return Op0;
554 
555   Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements());
556   return Builder.CreateSelect(Mask, Op0, Op1);
557 }
558 
559 static Value *UpgradeX86PALIGNRIntrinsics(IRBuilder<> &Builder,
560                                           Value *Op0, Value *Op1, Value *Shift,
561                                           Value *Passthru, Value *Mask) {
562   unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
563 
564   unsigned NumElts = Op0->getType()->getVectorNumElements();
565   assert(NumElts % 16 == 0);
566 
567   // If palignr is shifting the pair of vectors more than the size of two
568   // lanes, emit zero.
569   if (ShiftVal >= 32)
570     return llvm::Constant::getNullValue(Op0->getType());
571 
572   // If palignr is shifting the pair of input vectors more than one lane,
573   // but less than two lanes, convert to shifting in zeroes.
574   if (ShiftVal > 16) {
575     ShiftVal -= 16;
576     Op1 = Op0;
577     Op0 = llvm::Constant::getNullValue(Op0->getType());
578   }
579 
580   uint32_t Indices[64];
581   // 256-bit palignr operates on 128-bit lanes so we need to handle that
582   for (unsigned l = 0; l != NumElts; l += 16) {
583     for (unsigned i = 0; i != 16; ++i) {
584       unsigned Idx = ShiftVal + i;
585       if (Idx >= 16)
586         Idx += NumElts - 16; // End of lane, switch operand.
587       Indices[l + i] = Idx + l;
588     }
589   }
590 
591   Value *Align = Builder.CreateShuffleVector(Op1, Op0,
592                                              makeArrayRef(Indices, NumElts),
593                                              "palignr");
594 
595   return EmitX86Select(Builder, Mask, Align, Passthru);
596 }
597 
598 static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
599                                  Value *Ptr, Value *Data, Value *Mask,
600                                  bool Aligned) {
601   // Cast the pointer to the right type.
602   Ptr = Builder.CreateBitCast(Ptr,
603                               llvm::PointerType::getUnqual(Data->getType()));
604   unsigned Align =
605     Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1;
606 
607   // If the mask is all ones just emit a regular store.
608   if (const auto *C = dyn_cast<Constant>(Mask))
609     if (C->isAllOnesValue())
610       return Builder.CreateAlignedStore(Data, Ptr, Align);
611 
612   // Convert the mask from an integer type to a vector of i1.
613   unsigned NumElts = Data->getType()->getVectorNumElements();
614   Mask = getX86MaskVec(Builder, Mask, NumElts);
615   return Builder.CreateMaskedStore(Data, Ptr, Align, Mask);
616 }
617 
618 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
619                                 Value *Ptr, Value *Passthru, Value *Mask,
620                                 bool Aligned) {
621   // Cast the pointer to the right type.
622   Ptr = Builder.CreateBitCast(Ptr,
623                              llvm::PointerType::getUnqual(Passthru->getType()));
624   unsigned Align =
625     Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
626 
627   // If the mask is all ones just emit a regular store.
628   if (const auto *C = dyn_cast<Constant>(Mask))
629     if (C->isAllOnesValue())
630       return Builder.CreateAlignedLoad(Ptr, Align);
631 
632   // Convert the mask from an integer type to a vector of i1.
633   unsigned NumElts = Passthru->getType()->getVectorNumElements();
634   Mask = getX86MaskVec(Builder, Mask, NumElts);
635   return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
636 }
637 
638 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI,
639                                ICmpInst::Predicate Pred) {
640   Value *Op0 = CI.getArgOperand(0);
641   Value *Op1 = CI.getArgOperand(1);
642   Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1);
643   return Builder.CreateSelect(Cmp, Op0, Op1);
644 }
645 
646 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI,
647                                    ICmpInst::Predicate Pred) {
648   Value *Op0 = CI.getArgOperand(0);
649   unsigned NumElts = Op0->getType()->getVectorNumElements();
650   Value *Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
651 
652   Value *Mask = CI.getArgOperand(2);
653   const auto *C = dyn_cast<Constant>(Mask);
654   if (!C || !C->isAllOnesValue())
655     Cmp = Builder.CreateAnd(Cmp, getX86MaskVec(Builder, Mask, NumElts));
656 
657   if (NumElts < 8) {
658     uint32_t Indices[8];
659     for (unsigned i = 0; i != NumElts; ++i)
660       Indices[i] = i;
661     for (unsigned i = NumElts; i != 8; ++i)
662       Indices[i] = NumElts + i % NumElts;
663     Cmp = Builder.CreateShuffleVector(Cmp,
664                                       Constant::getNullValue(Cmp->getType()),
665                                       Indices);
666   }
667   return Builder.CreateBitCast(Cmp, IntegerType::get(CI.getContext(),
668                                                      std::max(NumElts, 8U)));
669 }
670 
671 /// Upgrade a call to an old intrinsic. All argument and return casting must be
672 /// provided to seamlessly integrate with existing context.
673 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
674   Function *F = CI->getCalledFunction();
675   LLVMContext &C = CI->getContext();
676   IRBuilder<> Builder(C);
677   Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
678 
679   assert(F && "Intrinsic call is not direct?");
680 
681   if (!NewFn) {
682     // Get the Function's name.
683     StringRef Name = F->getName();
684 
685     assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'");
686     Name = Name.substr(5);
687 
688     bool IsX86 = Name.startswith("x86.");
689     if (IsX86)
690       Name = Name.substr(4);
691 
692     Value *Rep;
693     // Upgrade packed integer vector compare intrinsics to compare instructions.
694     if (IsX86 && (Name.startswith("sse2.pcmpeq.") ||
695                   Name.startswith("avx2.pcmpeq."))) {
696       Rep = Builder.CreateICmpEQ(CI->getArgOperand(0), CI->getArgOperand(1),
697                                  "pcmpeq");
698       Rep = Builder.CreateSExt(Rep, CI->getType(), "");
699     } else if (IsX86 && (Name.startswith("sse2.pcmpgt.") ||
700                          Name.startswith("avx2.pcmpgt."))) {
701       Rep = Builder.CreateICmpSGT(CI->getArgOperand(0), CI->getArgOperand(1),
702                                   "pcmpgt");
703       Rep = Builder.CreateSExt(Rep, CI->getType(), "");
704     } else if (IsX86 && Name.startswith("avx512.mask.pcmpeq.")) {
705       Rep = upgradeMaskedCompare(Builder, *CI, ICmpInst::ICMP_EQ);
706     } else if (IsX86 && Name.startswith("avx512.mask.pcmpgt.")) {
707       Rep = upgradeMaskedCompare(Builder, *CI, ICmpInst::ICMP_SGT);
708     } else if (IsX86 && (Name == "sse41.pmaxsb" ||
709                          Name == "sse2.pmaxs.w" ||
710                          Name == "sse41.pmaxsd" ||
711                          Name.startswith("avx2.pmaxs"))) {
712       Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT);
713     } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
714                          Name == "sse41.pmaxuw" ||
715                          Name == "sse41.pmaxud" ||
716                          Name.startswith("avx2.pmaxu"))) {
717       Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT);
718     } else if (IsX86 && (Name == "sse41.pminsb" ||
719                          Name == "sse2.pmins.w" ||
720                          Name == "sse41.pminsd" ||
721                          Name.startswith("avx2.pmins"))) {
722       Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT);
723     } else if (IsX86 && (Name == "sse2.pminu.b" ||
724                          Name == "sse41.pminuw" ||
725                          Name == "sse41.pminud" ||
726                          Name.startswith("avx2.pminu"))) {
727       Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT);
728     } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
729                          Name == "sse2.cvtps2pd" ||
730                          Name == "avx.cvtdq2.pd.256" ||
731                          Name == "avx.cvt.ps2.pd.256")) {
732       // Lossless i32/float to double conversion.
733       // Extract the bottom elements if necessary and convert to double vector.
734       Value *Src = CI->getArgOperand(0);
735       VectorType *SrcTy = cast<VectorType>(Src->getType());
736       VectorType *DstTy = cast<VectorType>(CI->getType());
737       Rep = CI->getArgOperand(0);
738 
739       unsigned NumDstElts = DstTy->getNumElements();
740       if (NumDstElts < SrcTy->getNumElements()) {
741         assert(NumDstElts == 2 && "Unexpected vector size");
742         uint32_t ShuffleMask[2] = { 0, 1 };
743         Rep = Builder.CreateShuffleVector(Rep, UndefValue::get(SrcTy),
744                                           ShuffleMask);
745       }
746 
747       bool Int2Double = (StringRef::npos != Name.find("cvtdq2"));
748       if (Int2Double)
749         Rep = Builder.CreateSIToFP(Rep, DstTy, "cvtdq2pd");
750       else
751         Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
752     } else if (IsX86 && Name.startswith("sse4a.movnt.")) {
753       Module *M = F->getParent();
754       SmallVector<Metadata *, 1> Elts;
755       Elts.push_back(
756           ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
757       MDNode *Node = MDNode::get(C, Elts);
758 
759       Value *Arg0 = CI->getArgOperand(0);
760       Value *Arg1 = CI->getArgOperand(1);
761 
762       // Nontemporal (unaligned) store of the 0'th element of the float/double
763       // vector.
764       Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
765       PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
766       Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
767       Value *Extract =
768           Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
769 
770       StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1);
771       SI->setMetadata(M->getMDKindID("nontemporal"), Node);
772 
773       // Remove intrinsic.
774       CI->eraseFromParent();
775       return;
776     } else if (IsX86 && (Name.startswith("avx.movnt.") ||
777                          Name.startswith("avx512.storent."))) {
778       Module *M = F->getParent();
779       SmallVector<Metadata *, 1> Elts;
780       Elts.push_back(
781           ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
782       MDNode *Node = MDNode::get(C, Elts);
783 
784       Value *Arg0 = CI->getArgOperand(0);
785       Value *Arg1 = CI->getArgOperand(1);
786 
787       // Convert the type of the pointer to a pointer to the stored type.
788       Value *BC = Builder.CreateBitCast(Arg0,
789                                         PointerType::getUnqual(Arg1->getType()),
790                                         "cast");
791       VectorType *VTy = cast<VectorType>(Arg1->getType());
792       StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC,
793                                                  VTy->getBitWidth() / 8);
794       SI->setMetadata(M->getMDKindID("nontemporal"), Node);
795 
796       // Remove intrinsic.
797       CI->eraseFromParent();
798       return;
799     } else if (IsX86 && Name == "sse2.storel.dq") {
800       Value *Arg0 = CI->getArgOperand(0);
801       Value *Arg1 = CI->getArgOperand(1);
802 
803       Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
804       Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
805       Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
806       Value *BC = Builder.CreateBitCast(Arg0,
807                                         PointerType::getUnqual(Elt->getType()),
808                                         "cast");
809       Builder.CreateAlignedStore(Elt, BC, 1);
810 
811       // Remove intrinsic.
812       CI->eraseFromParent();
813       return;
814     } else if (IsX86 && (Name.startswith("sse.storeu.") ||
815                          Name.startswith("sse2.storeu.") ||
816                          Name.startswith("avx.storeu."))) {
817       Value *Arg0 = CI->getArgOperand(0);
818       Value *Arg1 = CI->getArgOperand(1);
819 
820       Arg0 = Builder.CreateBitCast(Arg0,
821                                    PointerType::getUnqual(Arg1->getType()),
822                                    "cast");
823       Builder.CreateAlignedStore(Arg1, Arg0, 1);
824 
825       // Remove intrinsic.
826       CI->eraseFromParent();
827       return;
828     } else if (IsX86 && (Name.startswith("avx512.mask.storeu."))) {
829       UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
830                          CI->getArgOperand(2), /*Aligned*/false);
831 
832       // Remove intrinsic.
833       CI->eraseFromParent();
834       return;
835     } else if (IsX86 && (Name.startswith("avx512.mask.store."))) {
836       UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
837                          CI->getArgOperand(2), /*Aligned*/true);
838 
839       // Remove intrinsic.
840       CI->eraseFromParent();
841       return;
842     } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) {
843       Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0),
844                               CI->getArgOperand(1), CI->getArgOperand(2),
845                               /*Aligned*/false);
846     } else if (IsX86 && (Name.startswith("avx512.mask.load."))) {
847       Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0),
848                               CI->getArgOperand(1),CI->getArgOperand(2),
849                               /*Aligned*/true);
850     } else if (IsX86 && Name.startswith("xop.vpcom")) {
851       Intrinsic::ID intID;
852       if (Name.endswith("ub"))
853         intID = Intrinsic::x86_xop_vpcomub;
854       else if (Name.endswith("uw"))
855         intID = Intrinsic::x86_xop_vpcomuw;
856       else if (Name.endswith("ud"))
857         intID = Intrinsic::x86_xop_vpcomud;
858       else if (Name.endswith("uq"))
859         intID = Intrinsic::x86_xop_vpcomuq;
860       else if (Name.endswith("b"))
861         intID = Intrinsic::x86_xop_vpcomb;
862       else if (Name.endswith("w"))
863         intID = Intrinsic::x86_xop_vpcomw;
864       else if (Name.endswith("d"))
865         intID = Intrinsic::x86_xop_vpcomd;
866       else if (Name.endswith("q"))
867         intID = Intrinsic::x86_xop_vpcomq;
868       else
869         llvm_unreachable("Unknown suffix");
870 
871       Name = Name.substr(9); // strip off "xop.vpcom"
872       unsigned Imm;
873       if (Name.startswith("lt"))
874         Imm = 0;
875       else if (Name.startswith("le"))
876         Imm = 1;
877       else if (Name.startswith("gt"))
878         Imm = 2;
879       else if (Name.startswith("ge"))
880         Imm = 3;
881       else if (Name.startswith("eq"))
882         Imm = 4;
883       else if (Name.startswith("ne"))
884         Imm = 5;
885       else if (Name.startswith("false"))
886         Imm = 6;
887       else if (Name.startswith("true"))
888         Imm = 7;
889       else
890         llvm_unreachable("Unknown condition");
891 
892       Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID);
893       Rep =
894           Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1),
895                                      Builder.getInt8(Imm)});
896     } else if (IsX86 && Name == "xop.vpcmov") {
897       Value *Arg0 = CI->getArgOperand(0);
898       Value *Arg1 = CI->getArgOperand(1);
899       Value *Sel = CI->getArgOperand(2);
900       unsigned NumElts = CI->getType()->getVectorNumElements();
901       Constant *MinusOne = ConstantVector::getSplat(NumElts, Builder.getInt64(-1));
902       Value *NotSel = Builder.CreateXor(Sel, MinusOne);
903       Value *Sel0 = Builder.CreateAnd(Arg0, Sel);
904       Value *Sel1 = Builder.CreateAnd(Arg1, NotSel);
905       Rep = Builder.CreateOr(Sel0, Sel1);
906     } else if (IsX86 && Name == "sse42.crc32.64.8") {
907       Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
908                                                Intrinsic::x86_sse42_crc32_32_8);
909       Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
910       Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
911       Rep = Builder.CreateZExt(Rep, CI->getType(), "");
912     } else if (IsX86 && Name.startswith("avx.vbroadcast.s")) {
913       // Replace broadcasts with a series of insertelements.
914       Type *VecTy = CI->getType();
915       Type *EltTy = VecTy->getVectorElementType();
916       unsigned EltNum = VecTy->getVectorNumElements();
917       Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
918                                           EltTy->getPointerTo());
919       Value *Load = Builder.CreateLoad(EltTy, Cast);
920       Type *I32Ty = Type::getInt32Ty(C);
921       Rep = UndefValue::get(VecTy);
922       for (unsigned I = 0; I < EltNum; ++I)
923         Rep = Builder.CreateInsertElement(Rep, Load,
924                                           ConstantInt::get(I32Ty, I));
925     } else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
926                          Name.startswith("sse41.pmovzx") ||
927                          Name.startswith("avx2.pmovsx") ||
928                          Name.startswith("avx2.pmovzx"))) {
929       VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType());
930       VectorType *DstTy = cast<VectorType>(CI->getType());
931       unsigned NumDstElts = DstTy->getNumElements();
932 
933       // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
934       SmallVector<uint32_t, 8> ShuffleMask(NumDstElts);
935       for (unsigned i = 0; i != NumDstElts; ++i)
936         ShuffleMask[i] = i;
937 
938       Value *SV = Builder.CreateShuffleVector(
939           CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask);
940 
941       bool DoSext = (StringRef::npos != Name.find("pmovsx"));
942       Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
943                    : Builder.CreateZExt(SV, DstTy);
944     } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
945                          Name == "avx2.vbroadcasti128")) {
946       // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
947       Type *EltTy = CI->getType()->getVectorElementType();
948       unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
949       Type *VT = VectorType::get(EltTy, NumSrcElts);
950       Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
951                                             PointerType::getUnqual(VT));
952       Value *Load = Builder.CreateAlignedLoad(Op, 1);
953       if (NumSrcElts == 2)
954         Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
955                                           { 0, 1, 0, 1 });
956       else
957         Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
958                                           { 0, 1, 2, 3, 0, 1, 2, 3 });
959     } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
960                          Name.startswith("avx2.vbroadcast") ||
961                          Name.startswith("avx512.pbroadcast") ||
962                          Name.startswith("avx512.mask.broadcast.s"))) {
963       // Replace vp?broadcasts with a vector shuffle.
964       Value *Op = CI->getArgOperand(0);
965       unsigned NumElts = CI->getType()->getVectorNumElements();
966       Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
967       Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
968                                         Constant::getNullValue(MaskTy));
969 
970       if (CI->getNumArgOperands() == 3)
971         Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
972                             CI->getArgOperand(1));
973     } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
974       Rep = UpgradeX86PALIGNRIntrinsics(Builder, CI->getArgOperand(0),
975                                         CI->getArgOperand(1),
976                                         CI->getArgOperand(2),
977                                         CI->getArgOperand(3),
978                                         CI->getArgOperand(4));
979     } else if (IsX86 && (Name == "sse2.psll.dq" ||
980                          Name == "avx2.psll.dq")) {
981       // 128/256-bit shift left specified in bits.
982       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
983       Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
984                                        Shift / 8); // Shift is in bits.
985     } else if (IsX86 && (Name == "sse2.psrl.dq" ||
986                          Name == "avx2.psrl.dq")) {
987       // 128/256-bit shift right specified in bits.
988       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
989       Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
990                                        Shift / 8); // Shift is in bits.
991     } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
992                          Name == "avx2.psll.dq.bs" ||
993                          Name == "avx512.psll.dq.512")) {
994       // 128/256/512-bit shift left specified in bytes.
995       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
996       Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
997     } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
998                          Name == "avx2.psrl.dq.bs" ||
999                          Name == "avx512.psrl.dq.512")) {
1000       // 128/256/512-bit shift right specified in bytes.
1001       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1002       Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
1003     } else if (IsX86 && (Name == "sse41.pblendw" ||
1004                          Name.startswith("sse41.blendp") ||
1005                          Name.startswith("avx.blend.p") ||
1006                          Name == "avx2.pblendw" ||
1007                          Name.startswith("avx2.pblendd."))) {
1008       Value *Op0 = CI->getArgOperand(0);
1009       Value *Op1 = CI->getArgOperand(1);
1010       unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1011       VectorType *VecTy = cast<VectorType>(CI->getType());
1012       unsigned NumElts = VecTy->getNumElements();
1013 
1014       SmallVector<uint32_t, 16> Idxs(NumElts);
1015       for (unsigned i = 0; i != NumElts; ++i)
1016         Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
1017 
1018       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1019     } else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
1020                          Name == "avx2.vinserti128")) {
1021       Value *Op0 = CI->getArgOperand(0);
1022       Value *Op1 = CI->getArgOperand(1);
1023       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1024       VectorType *VecTy = cast<VectorType>(CI->getType());
1025       unsigned NumElts = VecTy->getNumElements();
1026 
1027       // Mask off the high bits of the immediate value; hardware ignores those.
1028       Imm = Imm & 1;
1029 
1030       // Extend the second operand into a vector that is twice as big.
1031       Value *UndefV = UndefValue::get(Op1->getType());
1032       SmallVector<uint32_t, 8> Idxs(NumElts);
1033       for (unsigned i = 0; i != NumElts; ++i)
1034         Idxs[i] = i;
1035       Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs);
1036 
1037       // Insert the second operand into the first operand.
1038 
1039       // Note that there is no guarantee that instruction lowering will actually
1040       // produce a vinsertf128 instruction for the created shuffles. In
1041       // particular, the 0 immediate case involves no lane changes, so it can
1042       // be handled as a blend.
1043 
1044       // Example of shuffle mask for 32-bit elements:
1045       // Imm = 1  <i32 0, i32 1, i32 2,  i32 3,  i32 8, i32 9, i32 10, i32 11>
1046       // Imm = 0  <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6,  i32 7 >
1047 
1048       // The low half of the result is either the low half of the 1st operand
1049       // or the low half of the 2nd operand (the inserted vector).
1050       for (unsigned i = 0; i != NumElts / 2; ++i)
1051         Idxs[i] = Imm ? i : (i + NumElts);
1052       // The high half of the result is either the low half of the 2nd operand
1053       // (the inserted vector) or the high half of the 1st operand.
1054       for (unsigned i = NumElts / 2; i != NumElts; ++i)
1055         Idxs[i] = Imm ? (i + NumElts / 2) : i;
1056       Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
1057     } else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
1058                          Name == "avx2.vextracti128")) {
1059       Value *Op0 = CI->getArgOperand(0);
1060       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1061       VectorType *VecTy = cast<VectorType>(CI->getType());
1062       unsigned NumElts = VecTy->getNumElements();
1063 
1064       // Mask off the high bits of the immediate value; hardware ignores those.
1065       Imm = Imm & 1;
1066 
1067       // Get indexes for either the high half or low half of the input vector.
1068       SmallVector<uint32_t, 4> Idxs(NumElts);
1069       for (unsigned i = 0; i != NumElts; ++i) {
1070         Idxs[i] = Imm ? (i + NumElts) : i;
1071       }
1072 
1073       Value *UndefV = UndefValue::get(Op0->getType());
1074       Rep = Builder.CreateShuffleVector(Op0, UndefV, Idxs);
1075     } else if (!IsX86 && Name == "stackprotectorcheck") {
1076       Rep = nullptr;
1077     } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
1078                          Name.startswith("avx512.mask.perm.di."))) {
1079       Value *Op0 = CI->getArgOperand(0);
1080       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1081       VectorType *VecTy = cast<VectorType>(CI->getType());
1082       unsigned NumElts = VecTy->getNumElements();
1083 
1084       SmallVector<uint32_t, 8> Idxs(NumElts);
1085       for (unsigned i = 0; i != NumElts; ++i)
1086         Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
1087 
1088       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1089 
1090       if (CI->getNumArgOperands() == 4)
1091         Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1092                             CI->getArgOperand(2));
1093     } else if (IsX86 && (Name.startswith("avx.vpermil.") ||
1094                          Name == "sse2.pshuf.d" ||
1095                          Name.startswith("avx512.mask.vpermil.p") ||
1096                          Name.startswith("avx512.mask.pshuf.d."))) {
1097       Value *Op0 = CI->getArgOperand(0);
1098       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1099       VectorType *VecTy = cast<VectorType>(CI->getType());
1100       unsigned NumElts = VecTy->getNumElements();
1101       // Calculate the size of each index in the immediate.
1102       unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
1103       unsigned IdxMask = ((1 << IdxSize) - 1);
1104 
1105       SmallVector<uint32_t, 8> Idxs(NumElts);
1106       // Lookup the bits for this element, wrapping around the immediate every
1107       // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
1108       // to offset by the first index of each group.
1109       for (unsigned i = 0; i != NumElts; ++i)
1110         Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
1111 
1112       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1113 
1114       if (CI->getNumArgOperands() == 4)
1115         Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1116                             CI->getArgOperand(2));
1117     } else if (IsX86 && (Name == "sse2.pshufl.w" ||
1118                          Name.startswith("avx512.mask.pshufl.w."))) {
1119       Value *Op0 = CI->getArgOperand(0);
1120       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1121       unsigned NumElts = CI->getType()->getVectorNumElements();
1122 
1123       SmallVector<uint32_t, 16> Idxs(NumElts);
1124       for (unsigned l = 0; l != NumElts; l += 8) {
1125         for (unsigned i = 0; i != 4; ++i)
1126           Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
1127         for (unsigned i = 4; i != 8; ++i)
1128           Idxs[i + l] = i + l;
1129       }
1130 
1131       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1132 
1133       if (CI->getNumArgOperands() == 4)
1134         Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1135                             CI->getArgOperand(2));
1136     } else if (IsX86 && (Name == "sse2.pshufh.w" ||
1137                          Name.startswith("avx512.mask.pshufh.w."))) {
1138       Value *Op0 = CI->getArgOperand(0);
1139       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1140       unsigned NumElts = CI->getType()->getVectorNumElements();
1141 
1142       SmallVector<uint32_t, 16> Idxs(NumElts);
1143       for (unsigned l = 0; l != NumElts; l += 8) {
1144         for (unsigned i = 0; i != 4; ++i)
1145           Idxs[i + l] = i + l;
1146         for (unsigned i = 0; i != 4; ++i)
1147           Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
1148       }
1149 
1150       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1151 
1152       if (CI->getNumArgOperands() == 4)
1153         Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1154                             CI->getArgOperand(2));
1155     } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
1156       Value *Op0 = CI->getArgOperand(0);
1157       Value *Op1 = CI->getArgOperand(1);
1158       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1159       unsigned NumElts = CI->getType()->getVectorNumElements();
1160 
1161       unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
1162       unsigned HalfLaneElts = NumLaneElts / 2;
1163 
1164       SmallVector<uint32_t, 16> Idxs(NumElts);
1165       for (unsigned i = 0; i != NumElts; ++i) {
1166         // Base index is the starting element of the lane.
1167         Idxs[i] = i - (i % NumLaneElts);
1168         // If we are half way through the lane switch to the other source.
1169         if ((i % NumLaneElts) >= HalfLaneElts)
1170           Idxs[i] += NumElts;
1171         // Now select the specific element. By adding HalfLaneElts bits from
1172         // the immediate. Wrapping around the immediate every 8-bits.
1173         Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
1174       }
1175 
1176       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1177 
1178       Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
1179                           CI->getArgOperand(3));
1180     } else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
1181                          Name.startswith("avx512.mask.movshdup") ||
1182                          Name.startswith("avx512.mask.movsldup"))) {
1183       Value *Op0 = CI->getArgOperand(0);
1184       unsigned NumElts = CI->getType()->getVectorNumElements();
1185       unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
1186 
1187       unsigned Offset = 0;
1188       if (Name.startswith("avx512.mask.movshdup."))
1189         Offset = 1;
1190 
1191       SmallVector<uint32_t, 16> Idxs(NumElts);
1192       for (unsigned l = 0; l != NumElts; l += NumLaneElts)
1193         for (unsigned i = 0; i != NumLaneElts; i += 2) {
1194           Idxs[i + l + 0] = i + l + Offset;
1195           Idxs[i + l + 1] = i + l + Offset;
1196         }
1197 
1198       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1199 
1200       Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1201                           CI->getArgOperand(1));
1202     } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
1203                          Name.startswith("avx512.mask.unpckl."))) {
1204       Value *Op0 = CI->getArgOperand(0);
1205       Value *Op1 = CI->getArgOperand(1);
1206       int NumElts = CI->getType()->getVectorNumElements();
1207       int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
1208 
1209       SmallVector<uint32_t, 64> Idxs(NumElts);
1210       for (int l = 0; l != NumElts; l += NumLaneElts)
1211         for (int i = 0; i != NumLaneElts; ++i)
1212           Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
1213 
1214       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1215 
1216       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1217                           CI->getArgOperand(2));
1218     } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
1219                          Name.startswith("avx512.mask.unpckh."))) {
1220       Value *Op0 = CI->getArgOperand(0);
1221       Value *Op1 = CI->getArgOperand(1);
1222       int NumElts = CI->getType()->getVectorNumElements();
1223       int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
1224 
1225       SmallVector<uint32_t, 64> Idxs(NumElts);
1226       for (int l = 0; l != NumElts; l += NumLaneElts)
1227         for (int i = 0; i != NumLaneElts; ++i)
1228           Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
1229 
1230       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1231 
1232       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1233                           CI->getArgOperand(2));
1234     } else if (IsX86 && Name.startswith("avx512.mask.pand.")) {
1235       Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1));
1236       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1237                           CI->getArgOperand(2));
1238     } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) {
1239       Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)),
1240                               CI->getArgOperand(1));
1241       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1242                           CI->getArgOperand(2));
1243     } else if (IsX86 && Name.startswith("avx512.mask.por.")) {
1244       Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1));
1245       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1246                           CI->getArgOperand(2));
1247     } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) {
1248       Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1));
1249       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1250                           CI->getArgOperand(2));
1251     } else if (IsX86 && Name.startswith("avx512.mask.and.")) {
1252       VectorType *FTy = cast<VectorType>(CI->getType());
1253       VectorType *ITy = VectorType::getInteger(FTy);
1254       Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
1255                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
1256       Rep = Builder.CreateBitCast(Rep, FTy);
1257       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1258                           CI->getArgOperand(2));
1259     } else if (IsX86 && Name.startswith("avx512.mask.andn.")) {
1260       VectorType *FTy = cast<VectorType>(CI->getType());
1261       VectorType *ITy = VectorType::getInteger(FTy);
1262       Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
1263       Rep = Builder.CreateAnd(Rep,
1264                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
1265       Rep = Builder.CreateBitCast(Rep, FTy);
1266       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1267                           CI->getArgOperand(2));
1268     } else if (IsX86 && Name.startswith("avx512.mask.or.")) {
1269       VectorType *FTy = cast<VectorType>(CI->getType());
1270       VectorType *ITy = VectorType::getInteger(FTy);
1271       Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
1272                              Builder.CreateBitCast(CI->getArgOperand(1), ITy));
1273       Rep = Builder.CreateBitCast(Rep, FTy);
1274       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1275                           CI->getArgOperand(2));
1276     } else if (IsX86 && Name.startswith("avx512.mask.xor.")) {
1277       VectorType *FTy = cast<VectorType>(CI->getType());
1278       VectorType *ITy = VectorType::getInteger(FTy);
1279       Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
1280                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
1281       Rep = Builder.CreateBitCast(Rep, FTy);
1282       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1283                           CI->getArgOperand(2));
1284     } else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
1285       Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
1286       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1287                           CI->getArgOperand(2));
1288     } else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
1289       Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
1290       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1291                           CI->getArgOperand(2));
1292     } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
1293       Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
1294       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1295                           CI->getArgOperand(2));
1296     } else if (IsX86 && (Name.startswith("avx512.mask.add.pd.128") ||
1297                          Name.startswith("avx512.mask.add.pd.256") ||
1298                          Name.startswith("avx512.mask.add.ps.128") ||
1299                          Name.startswith("avx512.mask.add.ps.256"))) {
1300       Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
1301       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1302                           CI->getArgOperand(2));
1303     } else if (IsX86 && (Name.startswith("avx512.mask.div.pd.128") ||
1304                          Name.startswith("avx512.mask.div.pd.256") ||
1305                          Name.startswith("avx512.mask.div.ps.128") ||
1306                          Name.startswith("avx512.mask.div.ps.256"))) {
1307       Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
1308       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1309                           CI->getArgOperand(2));
1310     } else if (IsX86 && (Name.startswith("avx512.mask.mul.pd.128") ||
1311                          Name.startswith("avx512.mask.mul.pd.256") ||
1312                          Name.startswith("avx512.mask.mul.ps.128") ||
1313                          Name.startswith("avx512.mask.mul.ps.256"))) {
1314       Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
1315       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1316                           CI->getArgOperand(2));
1317     } else if (IsX86 && (Name.startswith("avx512.mask.sub.pd.128") ||
1318                          Name.startswith("avx512.mask.sub.pd.256") ||
1319                          Name.startswith("avx512.mask.sub.ps.128") ||
1320                          Name.startswith("avx512.mask.sub.ps.256"))) {
1321       Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
1322       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1323                           CI->getArgOperand(2));
1324     } else {
1325       llvm_unreachable("Unknown function for CallInst upgrade.");
1326     }
1327 
1328     if (Rep)
1329       CI->replaceAllUsesWith(Rep);
1330     CI->eraseFromParent();
1331     return;
1332   }
1333 
1334   std::string Name = CI->getName();
1335   if (!Name.empty())
1336     CI->setName(Name + ".old");
1337 
1338   switch (NewFn->getIntrinsicID()) {
1339   default:
1340     llvm_unreachable("Unknown function for CallInst upgrade.");
1341 
1342   case Intrinsic::x86_avx512_mask_psll_di_512:
1343   case Intrinsic::x86_avx512_mask_psra_di_512:
1344   case Intrinsic::x86_avx512_mask_psrl_di_512:
1345   case Intrinsic::x86_avx512_mask_psll_qi_512:
1346   case Intrinsic::x86_avx512_mask_psra_qi_512:
1347   case Intrinsic::x86_avx512_mask_psrl_qi_512:
1348   case Intrinsic::arm_neon_vld1:
1349   case Intrinsic::arm_neon_vld2:
1350   case Intrinsic::arm_neon_vld3:
1351   case Intrinsic::arm_neon_vld4:
1352   case Intrinsic::arm_neon_vld2lane:
1353   case Intrinsic::arm_neon_vld3lane:
1354   case Intrinsic::arm_neon_vld4lane:
1355   case Intrinsic::arm_neon_vst1:
1356   case Intrinsic::arm_neon_vst2:
1357   case Intrinsic::arm_neon_vst3:
1358   case Intrinsic::arm_neon_vst4:
1359   case Intrinsic::arm_neon_vst2lane:
1360   case Intrinsic::arm_neon_vst3lane:
1361   case Intrinsic::arm_neon_vst4lane: {
1362     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
1363                                  CI->arg_operands().end());
1364     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
1365     CI->eraseFromParent();
1366     return;
1367   }
1368 
1369   case Intrinsic::ctlz:
1370   case Intrinsic::cttz:
1371     assert(CI->getNumArgOperands() == 1 &&
1372            "Mismatch between function args and call args");
1373     CI->replaceAllUsesWith(Builder.CreateCall(
1374         NewFn, {CI->getArgOperand(0), Builder.getFalse()}, Name));
1375     CI->eraseFromParent();
1376     return;
1377 
1378   case Intrinsic::objectsize:
1379     CI->replaceAllUsesWith(Builder.CreateCall(
1380         NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)}, Name));
1381     CI->eraseFromParent();
1382     return;
1383 
1384   case Intrinsic::ctpop: {
1385     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {CI->getArgOperand(0)}));
1386     CI->eraseFromParent();
1387     return;
1388   }
1389 
1390   case Intrinsic::x86_xop_vfrcz_ss:
1391   case Intrinsic::x86_xop_vfrcz_sd:
1392     CI->replaceAllUsesWith(
1393         Builder.CreateCall(NewFn, {CI->getArgOperand(1)}, Name));
1394     CI->eraseFromParent();
1395     return;
1396 
1397   case Intrinsic::x86_xop_vpermil2pd:
1398   case Intrinsic::x86_xop_vpermil2ps:
1399   case Intrinsic::x86_xop_vpermil2pd_256:
1400   case Intrinsic::x86_xop_vpermil2ps_256: {
1401     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
1402                                  CI->arg_operands().end());
1403     VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
1404     VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
1405     Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
1406     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args, Name));
1407     CI->eraseFromParent();
1408     return;
1409   }
1410 
1411   case Intrinsic::x86_sse41_ptestc:
1412   case Intrinsic::x86_sse41_ptestz:
1413   case Intrinsic::x86_sse41_ptestnzc: {
1414     // The arguments for these intrinsics used to be v4f32, and changed
1415     // to v2i64. This is purely a nop, since those are bitwise intrinsics.
1416     // So, the only thing required is a bitcast for both arguments.
1417     // First, check the arguments have the old type.
1418     Value *Arg0 = CI->getArgOperand(0);
1419     if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4))
1420       return;
1421 
1422     // Old intrinsic, add bitcasts
1423     Value *Arg1 = CI->getArgOperand(1);
1424 
1425     Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
1426 
1427     Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
1428     Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
1429 
1430     CallInst *NewCall = Builder.CreateCall(NewFn, {BC0, BC1}, Name);
1431     CI->replaceAllUsesWith(NewCall);
1432     CI->eraseFromParent();
1433     return;
1434   }
1435 
1436   case Intrinsic::x86_sse41_insertps:
1437   case Intrinsic::x86_sse41_dppd:
1438   case Intrinsic::x86_sse41_dpps:
1439   case Intrinsic::x86_sse41_mpsadbw:
1440   case Intrinsic::x86_avx_dp_ps_256:
1441   case Intrinsic::x86_avx2_mpsadbw: {
1442     // Need to truncate the last argument from i32 to i8 -- this argument models
1443     // an inherently 8-bit immediate operand to these x86 instructions.
1444     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
1445                                  CI->arg_operands().end());
1446 
1447     // Replace the last argument with a trunc.
1448     Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
1449 
1450     CallInst *NewCall = Builder.CreateCall(NewFn, Args);
1451     CI->replaceAllUsesWith(NewCall);
1452     CI->eraseFromParent();
1453     return;
1454   }
1455 
1456   case Intrinsic::thread_pointer: {
1457     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {}));
1458     CI->eraseFromParent();
1459     return;
1460   }
1461 
1462   case Intrinsic::invariant_start:
1463   case Intrinsic::invariant_end:
1464   case Intrinsic::masked_load:
1465   case Intrinsic::masked_store: {
1466     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
1467                                  CI->arg_operands().end());
1468     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
1469     CI->eraseFromParent();
1470     return;
1471   }
1472   }
1473 }
1474 
1475 void llvm::UpgradeCallsToIntrinsic(Function *F) {
1476   assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
1477 
1478   // Check if this function should be upgraded and get the replacement function
1479   // if there is one.
1480   Function *NewFn;
1481   if (UpgradeIntrinsicFunction(F, NewFn)) {
1482     // Replace all users of the old function with the new function or new
1483     // instructions. This is not a range loop because the call is deleted.
1484     for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; )
1485       if (CallInst *CI = dyn_cast<CallInst>(*UI++))
1486         UpgradeIntrinsicCall(CI, NewFn);
1487 
1488     // Remove old function, no longer used, from the module.
1489     F->eraseFromParent();
1490   }
1491 }
1492 
1493 MDNode *llvm::UpgradeTBAANode(MDNode &MD) {
1494   // Check if the tag uses struct-path aware TBAA format.
1495   if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
1496     return &MD;
1497 
1498   auto &Context = MD.getContext();
1499   if (MD.getNumOperands() == 3) {
1500     Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
1501     MDNode *ScalarType = MDNode::get(Context, Elts);
1502     // Create a MDNode <ScalarType, ScalarType, offset 0, const>
1503     Metadata *Elts2[] = {ScalarType, ScalarType,
1504                          ConstantAsMetadata::get(
1505                              Constant::getNullValue(Type::getInt64Ty(Context))),
1506                          MD.getOperand(2)};
1507     return MDNode::get(Context, Elts2);
1508   }
1509   // Create a MDNode <MD, MD, offset 0>
1510   Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue(
1511                                     Type::getInt64Ty(Context)))};
1512   return MDNode::get(Context, Elts);
1513 }
1514 
1515 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
1516                                       Instruction *&Temp) {
1517   if (Opc != Instruction::BitCast)
1518     return nullptr;
1519 
1520   Temp = nullptr;
1521   Type *SrcTy = V->getType();
1522   if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
1523       SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
1524     LLVMContext &Context = V->getContext();
1525 
1526     // We have no information about target data layout, so we assume that
1527     // the maximum pointer size is 64bit.
1528     Type *MidTy = Type::getInt64Ty(Context);
1529     Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
1530 
1531     return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
1532   }
1533 
1534   return nullptr;
1535 }
1536 
1537 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
1538   if (Opc != Instruction::BitCast)
1539     return nullptr;
1540 
1541   Type *SrcTy = C->getType();
1542   if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
1543       SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
1544     LLVMContext &Context = C->getContext();
1545 
1546     // We have no information about target data layout, so we assume that
1547     // the maximum pointer size is 64bit.
1548     Type *MidTy = Type::getInt64Ty(Context);
1549 
1550     return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
1551                                      DestTy);
1552   }
1553 
1554   return nullptr;
1555 }
1556 
1557 /// Check the debug info version number, if it is out-dated, drop the debug
1558 /// info. Return true if module is modified.
1559 bool llvm::UpgradeDebugInfo(Module &M) {
1560   unsigned Version = getDebugMetadataVersionFromModule(M);
1561   if (Version == DEBUG_METADATA_VERSION)
1562     return false;
1563 
1564   bool RetCode = StripDebugInfo(M);
1565   if (RetCode) {
1566     DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
1567     M.getContext().diagnose(DiagVersion);
1568   }
1569   return RetCode;
1570 }
1571 
1572 bool llvm::UpgradeModuleFlags(Module &M) {
1573   const NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
1574   if (!ModFlags)
1575     return false;
1576 
1577   bool HasObjCFlag = false, HasClassProperties = false;
1578   for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
1579     MDNode *Op = ModFlags->getOperand(I);
1580     if (Op->getNumOperands() < 2)
1581       continue;
1582     MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1583     if (!ID)
1584       continue;
1585     if (ID->getString() == "Objective-C Image Info Version")
1586       HasObjCFlag = true;
1587     if (ID->getString() == "Objective-C Class Properties")
1588       HasClassProperties = true;
1589   }
1590   // "Objective-C Class Properties" is recently added for Objective-C. We
1591   // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
1592   // flag of value 0, so we can correclty downgrade this flag when trying to
1593   // link an ObjC bitcode without this module flag with an ObjC bitcode with
1594   // this module flag.
1595   if (HasObjCFlag && !HasClassProperties) {
1596     M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
1597                     (uint32_t)0);
1598     return true;
1599   }
1600   return false;
1601 }
1602 
1603 static bool isOldLoopArgument(Metadata *MD) {
1604   auto *T = dyn_cast_or_null<MDTuple>(MD);
1605   if (!T)
1606     return false;
1607   if (T->getNumOperands() < 1)
1608     return false;
1609   auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
1610   if (!S)
1611     return false;
1612   return S->getString().startswith("llvm.vectorizer.");
1613 }
1614 
1615 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
1616   StringRef OldPrefix = "llvm.vectorizer.";
1617   assert(OldTag.startswith(OldPrefix) && "Expected old prefix");
1618 
1619   if (OldTag == "llvm.vectorizer.unroll")
1620     return MDString::get(C, "llvm.loop.interleave.count");
1621 
1622   return MDString::get(
1623       C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
1624              .str());
1625 }
1626 
1627 static Metadata *upgradeLoopArgument(Metadata *MD) {
1628   auto *T = dyn_cast_or_null<MDTuple>(MD);
1629   if (!T)
1630     return MD;
1631   if (T->getNumOperands() < 1)
1632     return MD;
1633   auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
1634   if (!OldTag)
1635     return MD;
1636   if (!OldTag->getString().startswith("llvm.vectorizer."))
1637     return MD;
1638 
1639   // This has an old tag.  Upgrade it.
1640   SmallVector<Metadata *, 8> Ops;
1641   Ops.reserve(T->getNumOperands());
1642   Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
1643   for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
1644     Ops.push_back(T->getOperand(I));
1645 
1646   return MDTuple::get(T->getContext(), Ops);
1647 }
1648 
1649 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
1650   auto *T = dyn_cast<MDTuple>(&N);
1651   if (!T)
1652     return &N;
1653 
1654   if (none_of(T->operands(), isOldLoopArgument))
1655     return &N;
1656 
1657   SmallVector<Metadata *, 8> Ops;
1658   Ops.reserve(T->getNumOperands());
1659   for (Metadata *MD : T->operands())
1660     Ops.push_back(upgradeLoopArgument(MD));
1661 
1662   return MDTuple::get(T->getContext(), Ops);
1663 }
1664