1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the auto-upgrade helper functions.
11 // This is where deprecated IR intrinsics and other IR features are updated to
12 // current specifications.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/IR/AutoUpgrade.h"
17 #include "llvm/IR/CFG.h"
18 #include "llvm/IR/CallSite.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/DIBuilder.h"
21 #include "llvm/IR/DebugInfo.h"
22 #include "llvm/IR/DiagnosticInfo.h"
23 #include "llvm/IR/Function.h"
24 #include "llvm/IR/IRBuilder.h"
25 #include "llvm/IR/Instruction.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/LLVMContext.h"
28 #include "llvm/IR/Module.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/Regex.h"
31 #include <cstring>
32 using namespace llvm;
33 
34 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); }
35 
36 // Upgrade the declarations of the SSE4.1 functions whose arguments have
37 // changed their type from v4f32 to v2i64.
38 static bool UpgradeSSE41Function(Function* F, Intrinsic::ID IID,
39                                  Function *&NewFn) {
40   // Check whether this is an old version of the function, which received
41   // v4f32 arguments.
42   Type *Arg0Type = F->getFunctionType()->getParamType(0);
43   if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4))
44     return false;
45 
46   // Yes, it's old, replace it with new version.
47   rename(F);
48   NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
49   return true;
50 }
51 
52 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask
53 // arguments have changed their type from i32 to i8.
54 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID,
55                                              Function *&NewFn) {
56   // Check that the last argument is an i32.
57   Type *LastArgType = F->getFunctionType()->getParamType(
58      F->getFunctionType()->getNumParams() - 1);
59   if (!LastArgType->isIntegerTy(32))
60     return false;
61 
62   // Move this function aside and map down.
63   rename(F);
64   NewFn = Intrinsic::getDeclaration(F->getParent(), IID);
65   return true;
66 }
67 
68 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
69   assert(F && "Illegal to upgrade a non-existent Function.");
70 
71   // Quickly eliminate it, if it's not a candidate.
72   StringRef Name = F->getName();
73   if (Name.size() <= 8 || !Name.startswith("llvm."))
74     return false;
75   Name = Name.substr(5); // Strip off "llvm."
76 
77   switch (Name[0]) {
78   default: break;
79   case 'a': {
80     if (Name.startswith("arm.neon.vclz")) {
81       Type* args[2] = {
82         F->arg_begin()->getType(),
83         Type::getInt1Ty(F->getContext())
84       };
85       // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to
86       // the end of the name. Change name from llvm.arm.neon.vclz.* to
87       //  llvm.ctlz.*
88       FunctionType* fType = FunctionType::get(F->getReturnType(), args, false);
89       NewFn = Function::Create(fType, F->getLinkage(),
90                                "llvm.ctlz." + Name.substr(14), F->getParent());
91       return true;
92     }
93     if (Name.startswith("arm.neon.vcnt")) {
94       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop,
95                                         F->arg_begin()->getType());
96       return true;
97     }
98     Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$");
99     if (vldRegex.match(Name)) {
100       auto fArgs = F->getFunctionType()->params();
101       SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end());
102       // Can't use Intrinsic::getDeclaration here as the return types might
103       // then only be structurally equal.
104       FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false);
105       NewFn = Function::Create(fType, F->getLinkage(),
106                                "llvm." + Name + ".p0i8", F->getParent());
107       return true;
108     }
109     Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$");
110     if (vstRegex.match(Name)) {
111       static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1,
112                                                 Intrinsic::arm_neon_vst2,
113                                                 Intrinsic::arm_neon_vst3,
114                                                 Intrinsic::arm_neon_vst4};
115 
116       static const Intrinsic::ID StoreLaneInts[] = {
117         Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane,
118         Intrinsic::arm_neon_vst4lane
119       };
120 
121       auto fArgs = F->getFunctionType()->params();
122       Type *Tys[] = {fArgs[0], fArgs[1]};
123       if (Name.find("lane") == StringRef::npos)
124         NewFn = Intrinsic::getDeclaration(F->getParent(),
125                                           StoreInts[fArgs.size() - 3], Tys);
126       else
127         NewFn = Intrinsic::getDeclaration(F->getParent(),
128                                           StoreLaneInts[fArgs.size() - 5], Tys);
129       return true;
130     }
131     if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") {
132       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer);
133       return true;
134     }
135     break;
136   }
137 
138   case 'c': {
139     if (Name.startswith("ctlz.") && F->arg_size() == 1) {
140       rename(F);
141       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz,
142                                         F->arg_begin()->getType());
143       return true;
144     }
145     if (Name.startswith("cttz.") && F->arg_size() == 1) {
146       rename(F);
147       NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz,
148                                         F->arg_begin()->getType());
149       return true;
150     }
151     break;
152   }
153   case 'i': {
154     if (Name.startswith("invariant.start")) {
155       auto Args = F->getFunctionType()->params();
156       Type* ObjectPtr[1] = {Args[1]};
157       if (F->getName() !=
158           Intrinsic::getName(Intrinsic::invariant_start, ObjectPtr)) {
159         rename(F);
160         NewFn = Intrinsic::getDeclaration(
161             F->getParent(), Intrinsic::invariant_start, ObjectPtr);
162         return true;
163       }
164     }
165     if (Name.startswith("invariant.end")) {
166       auto Args = F->getFunctionType()->params();
167       Type* ObjectPtr[1] = {Args[2]};
168       if (F->getName() !=
169           Intrinsic::getName(Intrinsic::invariant_end, ObjectPtr)) {
170         rename(F);
171         NewFn = Intrinsic::getDeclaration(F->getParent(),
172                                           Intrinsic::invariant_end, ObjectPtr);
173         return true;
174       }
175     }
176     break;
177   }
178   case 'm': {
179     if (Name.startswith("masked.load.")) {
180       Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() };
181       if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) {
182         rename(F);
183         NewFn = Intrinsic::getDeclaration(F->getParent(),
184                                           Intrinsic::masked_load,
185                                           Tys);
186         return true;
187       }
188     }
189     if (Name.startswith("masked.store.")) {
190       auto Args = F->getFunctionType()->params();
191       Type *Tys[] = { Args[0], Args[1] };
192       if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) {
193         rename(F);
194         NewFn = Intrinsic::getDeclaration(F->getParent(),
195                                           Intrinsic::masked_store,
196                                           Tys);
197         return true;
198       }
199     }
200     break;
201   }
202 
203   case 'o':
204     // We only need to change the name to match the mangling including the
205     // address space.
206     if (F->arg_size() == 2 && Name.startswith("objectsize.")) {
207       Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() };
208       if (F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) {
209         rename(F);
210         NewFn = Intrinsic::getDeclaration(F->getParent(),
211                                           Intrinsic::objectsize, Tys);
212         return true;
213       }
214     }
215     break;
216 
217   case 's':
218     if (Name == "stackprotectorcheck") {
219       NewFn = nullptr;
220       return true;
221     }
222     break;
223 
224   case 'x': {
225     bool IsX86 = Name.startswith("x86.");
226     if (IsX86)
227       Name = Name.substr(4);
228 
229     // All of the intrinsics matches below should be marked with which llvm
230     // version started autoupgrading them. At some point in the future we would
231     // like to use this information to remove upgrade code for some older
232     // intrinsics. It is currently undecided how we will determine that future
233     // point.
234     if (IsX86 &&
235         (Name.startswith("sse2.pcmpeq.") || // Added in 3.1
236          Name.startswith("sse2.pcmpgt.") || // Added in 3.1
237          Name.startswith("avx2.pcmpeq.") || // Added in 3.1
238          Name.startswith("avx2.pcmpgt.") || // Added in 3.1
239          Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9
240          Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9
241          Name == "sse.add.ss" || // Added in 4.0
242          Name == "sse2.add.sd" || // Added in 4.0
243          Name == "sse.sub.ss" || // Added in 4.0
244          Name == "sse2.sub.sd" || // Added in 4.0
245          Name == "sse.mul.ss" || // Added in 4.0
246          Name == "sse2.mul.sd" || // Added in 4.0
247          Name == "sse.div.ss" || // Added in 4.0
248          Name == "sse2.div.sd" || // Added in 4.0
249          Name == "sse41.pmaxsb" || // Added in 3.9
250          Name == "sse2.pmaxs.w" || // Added in 3.9
251          Name == "sse41.pmaxsd" || // Added in 3.9
252          Name == "sse2.pmaxu.b" || // Added in 3.9
253          Name == "sse41.pmaxuw" || // Added in 3.9
254          Name == "sse41.pmaxud" || // Added in 3.9
255          Name == "sse41.pminsb" || // Added in 3.9
256          Name == "sse2.pmins.w" || // Added in 3.9
257          Name == "sse41.pminsd" || // Added in 3.9
258          Name == "sse2.pminu.b" || // Added in 3.9
259          Name == "sse41.pminuw" || // Added in 3.9
260          Name == "sse41.pminud" || // Added in 3.9
261          Name == "avx512.mask.pshuf.b.128" || // Added in 4.0
262          Name == "avx512.mask.pshuf.b.256" || // Added in 4.0
263          Name.startswith("avx2.pmax") || // Added in 3.9
264          Name.startswith("avx2.pmin") || // Added in 3.9
265          Name.startswith("avx512.mask.pmax") || // Added in 4.0
266          Name.startswith("avx512.mask.pmin") || // Added in 4.0
267          Name.startswith("avx2.vbroadcast") || // Added in 3.8
268          Name.startswith("avx2.pbroadcast") || // Added in 3.8
269          Name.startswith("avx.vpermil.") || // Added in 3.1
270          Name.startswith("sse2.pshuf") || // Added in 3.9
271          Name.startswith("avx512.pbroadcast") || // Added in 3.9
272          Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9
273          Name.startswith("avx512.mask.movddup") || // Added in 3.9
274          Name.startswith("avx512.mask.movshdup") || // Added in 3.9
275          Name.startswith("avx512.mask.movsldup") || // Added in 3.9
276          Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9
277          Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9
278          Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9
279          Name.startswith("avx512.mask.shuf.p") || // Added in 4.0
280          Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9
281          Name.startswith("avx512.mask.perm.df.") || // Added in 3.9
282          Name.startswith("avx512.mask.perm.di.") || // Added in 3.9
283          Name.startswith("avx512.mask.punpckl") || // Added in 3.9
284          Name.startswith("avx512.mask.punpckh") || // Added in 3.9
285          Name.startswith("avx512.mask.unpckl.") || // Added in 3.9
286          Name.startswith("avx512.mask.unpckh.") || // Added in 3.9
287          Name.startswith("avx512.mask.pand.") || // Added in 3.9
288          Name.startswith("avx512.mask.pandn.") || // Added in 3.9
289          Name.startswith("avx512.mask.por.") || // Added in 3.9
290          Name.startswith("avx512.mask.pxor.") || // Added in 3.9
291          Name.startswith("avx512.mask.and.") || // Added in 3.9
292          Name.startswith("avx512.mask.andn.") || // Added in 3.9
293          Name.startswith("avx512.mask.or.") || // Added in 3.9
294          Name.startswith("avx512.mask.xor.") || // Added in 3.9
295          Name.startswith("avx512.mask.padd.") || // Added in 4.0
296          Name.startswith("avx512.mask.psub.") || // Added in 4.0
297          Name.startswith("avx512.mask.pmull.") || // Added in 4.0
298          Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
299          Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
300          Name == "avx512.mask.add.pd.128" || // Added in 4.0
301          Name == "avx512.mask.add.pd.256" || // Added in 4.0
302          Name == "avx512.mask.add.ps.128" || // Added in 4.0
303          Name == "avx512.mask.add.ps.256" || // Added in 4.0
304          Name == "avx512.mask.div.pd.128" || // Added in 4.0
305          Name == "avx512.mask.div.pd.256" || // Added in 4.0
306          Name == "avx512.mask.div.ps.128" || // Added in 4.0
307          Name == "avx512.mask.div.ps.256" || // Added in 4.0
308          Name == "avx512.mask.mul.pd.128" || // Added in 4.0
309          Name == "avx512.mask.mul.pd.256" || // Added in 4.0
310          Name == "avx512.mask.mul.ps.128" || // Added in 4.0
311          Name == "avx512.mask.mul.ps.256" || // Added in 4.0
312          Name == "avx512.mask.sub.pd.128" || // Added in 4.0
313          Name == "avx512.mask.sub.pd.256" || // Added in 4.0
314          Name == "avx512.mask.sub.ps.128" || // Added in 4.0
315          Name == "avx512.mask.sub.ps.256" || // Added in 4.0
316          Name.startswith("avx512.mask.psll.d") || // Added in 4.0
317          Name.startswith("avx512.mask.psll.q") || // Added in 4.0
318          Name.startswith("avx512.mask.psll.w") || // Added in 4.0
319          Name.startswith("avx512.mask.psra.d") || // Added in 4.0
320          Name.startswith("avx512.mask.psra.q") || // Added in 4.0
321          Name.startswith("avx512.mask.psra.w") || // Added in 4.0
322          Name.startswith("avx512.mask.psrl.d") || // Added in 4.0
323          Name.startswith("avx512.mask.psrl.q") || // Added in 4.0
324          Name.startswith("avx512.mask.psrl.w") || // Added in 4.0
325          Name.startswith("avx512.mask.pslli") || // Added in 4.0
326          Name.startswith("avx512.mask.psrai") || // Added in 4.0
327          Name.startswith("avx512.mask.psrli") || // Added in 4.0
328          Name == "avx512.mask.psllv2.di" || // Added in 4.0
329          Name == "avx512.mask.psllv4.di" || // Added in 4.0
330          Name == "avx512.mask.psllv4.si" || // Added in 4.0
331          Name == "avx512.mask.psllv8.si" || // Added in 4.0
332          Name == "avx512.mask.psrav4.si" || // Added in 4.0
333          Name == "avx512.mask.psrav8.si" || // Added in 4.0
334          Name == "avx512.mask.psrlv2.di" || // Added in 4.0
335          Name == "avx512.mask.psrlv4.di" || // Added in 4.0
336          Name == "avx512.mask.psrlv4.si" || // Added in 4.0
337          Name == "avx512.mask.psrlv8.si" || // Added in 4.0
338          Name.startswith("avx512.mask.psllv.") || // Added in 4.0
339          Name.startswith("avx512.mask.psrav.") || // Added in 4.0
340          Name.startswith("avx512.mask.psrlv.") || // Added in 4.0
341          Name.startswith("sse41.pmovsx") || // Added in 3.8
342          Name.startswith("sse41.pmovzx") || // Added in 3.9
343          Name.startswith("avx2.pmovsx") || // Added in 3.9
344          Name.startswith("avx2.pmovzx") || // Added in 3.9
345          Name.startswith("avx512.mask.pmovsx") || // Added in 4.0
346          Name.startswith("avx512.mask.pmovzx") || // Added in 4.0
347          Name == "sse2.cvtdq2pd" || // Added in 3.9
348          Name == "sse2.cvtps2pd" || // Added in 3.9
349          Name == "avx.cvtdq2.pd.256" || // Added in 3.9
350          Name == "avx.cvt.ps2.pd.256" || // Added in 3.9
351          Name.startswith("avx.vinsertf128.") || // Added in 3.7
352          Name == "avx2.vinserti128" || // Added in 3.7
353          Name.startswith("avx.vextractf128.") || // Added in 3.7
354          Name == "avx2.vextracti128" || // Added in 3.7
355          Name.startswith("sse4a.movnt.") || // Added in 3.9
356          Name.startswith("avx.movnt.") || // Added in 3.2
357          Name.startswith("avx512.storent.") || // Added in 3.9
358          Name == "sse2.storel.dq" || // Added in 3.9
359          Name.startswith("sse.storeu.") || // Added in 3.9
360          Name.startswith("sse2.storeu.") || // Added in 3.9
361          Name.startswith("avx.storeu.") || // Added in 3.9
362          Name.startswith("avx512.mask.storeu.") || // Added in 3.9
363          Name.startswith("avx512.mask.store.p") || // Added in 3.9
364          Name.startswith("avx512.mask.store.b.") || // Added in 3.9
365          Name.startswith("avx512.mask.store.w.") || // Added in 3.9
366          Name.startswith("avx512.mask.store.d.") || // Added in 3.9
367          Name.startswith("avx512.mask.store.q.") || // Added in 3.9
368          Name.startswith("avx512.mask.loadu.") || // Added in 3.9
369          Name.startswith("avx512.mask.load.") || // Added in 3.9
370          Name == "sse42.crc32.64.8" || // Added in 3.4
371          Name.startswith("avx.vbroadcast.s") || // Added in 3.5
372          Name.startswith("avx512.mask.palignr.") || // Added in 3.9
373          Name.startswith("sse2.psll.dq") || // Added in 3.7
374          Name.startswith("sse2.psrl.dq") || // Added in 3.7
375          Name.startswith("avx2.psll.dq") || // Added in 3.7
376          Name.startswith("avx2.psrl.dq") || // Added in 3.7
377          Name.startswith("avx512.psll.dq") || // Added in 3.9
378          Name.startswith("avx512.psrl.dq") || // Added in 3.9
379          Name == "sse41.pblendw" || // Added in 3.7
380          Name.startswith("sse41.blendp") || // Added in 3.7
381          Name.startswith("avx.blend.p") || // Added in 3.7
382          Name == "avx2.pblendw" || // Added in 3.7
383          Name.startswith("avx2.pblendd.") || // Added in 3.7
384          Name.startswith("avx.vbroadcastf128") || // Added in 4.0
385          Name == "avx2.vbroadcasti128" || // Added in 3.7
386          Name == "xop.vpcmov" || // Added in 3.8
387          Name.startswith("avx512.mask.move.s") || // Added in 4.0
388          (Name.startswith("xop.vpcom") && // Added in 3.2
389           F->arg_size() == 2))) {
390       NewFn = nullptr;
391       return true;
392     }
393     // SSE4.1 ptest functions may have an old signature.
394     if (IsX86 && Name.startswith("sse41.ptest")) { // Added in 3.2
395       if (Name.substr(11) == "c")
396         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestc, NewFn);
397       if (Name.substr(11) == "z")
398         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestz, NewFn);
399       if (Name.substr(11) == "nzc")
400         return UpgradeSSE41Function(F, Intrinsic::x86_sse41_ptestnzc, NewFn);
401     }
402     // Several blend and other instructions with masks used the wrong number of
403     // bits.
404     if (IsX86 && Name == "sse41.insertps") // Added in 3.6
405       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps,
406                                               NewFn);
407     if (IsX86 && Name == "sse41.dppd") // Added in 3.6
408       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd,
409                                               NewFn);
410     if (IsX86 && Name == "sse41.dpps") // Added in 3.6
411       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps,
412                                               NewFn);
413     if (IsX86 && Name == "sse41.mpsadbw") // Added in 3.6
414       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw,
415                                               NewFn);
416     if (IsX86 && Name == "avx.dp.ps.256") // Added in 3.6
417       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256,
418                                               NewFn);
419     if (IsX86 && Name == "avx2.mpsadbw") // Added in 3.6
420       return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw,
421                                               NewFn);
422 
423     // frcz.ss/sd may need to have an argument dropped. Added in 3.2
424     if (IsX86 && Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) {
425       rename(F);
426       NewFn = Intrinsic::getDeclaration(F->getParent(),
427                                         Intrinsic::x86_xop_vfrcz_ss);
428       return true;
429     }
430     if (IsX86 && Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) {
431       rename(F);
432       NewFn = Intrinsic::getDeclaration(F->getParent(),
433                                         Intrinsic::x86_xop_vfrcz_sd);
434       return true;
435     }
436     // Upgrade any XOP PERMIL2 index operand still using a float/double vector.
437     if (IsX86 && Name.startswith("xop.vpermil2")) { // Added in 3.9
438       auto Params = F->getFunctionType()->params();
439       auto Idx = Params[2];
440       if (Idx->getScalarType()->isFloatingPointTy()) {
441         rename(F);
442         unsigned IdxSize = Idx->getPrimitiveSizeInBits();
443         unsigned EltSize = Idx->getScalarSizeInBits();
444         Intrinsic::ID Permil2ID;
445         if (EltSize == 64 && IdxSize == 128)
446           Permil2ID = Intrinsic::x86_xop_vpermil2pd;
447         else if (EltSize == 32 && IdxSize == 128)
448           Permil2ID = Intrinsic::x86_xop_vpermil2ps;
449         else if (EltSize == 64 && IdxSize == 256)
450           Permil2ID = Intrinsic::x86_xop_vpermil2pd_256;
451         else
452           Permil2ID = Intrinsic::x86_xop_vpermil2ps_256;
453         NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID);
454         return true;
455       }
456     }
457     break;
458   }
459   }
460 
461   //  This may not belong here. This function is effectively being overloaded
462   //  to both detect an intrinsic which needs upgrading, and to provide the
463   //  upgraded form of the intrinsic. We should perhaps have two separate
464   //  functions for this.
465   return false;
466 }
467 
468 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
469   NewFn = nullptr;
470   bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
471   assert(F != NewFn && "Intrinsic function upgraded to the same function");
472 
473   // Upgrade intrinsic attributes.  This does not change the function.
474   if (NewFn)
475     F = NewFn;
476   if (Intrinsic::ID id = F->getIntrinsicID())
477     F->setAttributes(Intrinsic::getAttributes(F->getContext(), id));
478   return Upgraded;
479 }
480 
481 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
482   // Nothing to do yet.
483   return false;
484 }
485 
486 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them
487 // to byte shuffles.
488 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder,
489                                          Value *Op, unsigned Shift) {
490   Type *ResultTy = Op->getType();
491   unsigned NumElts = ResultTy->getVectorNumElements() * 8;
492 
493   // Bitcast from a 64-bit element type to a byte element type.
494   Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
495   Op = Builder.CreateBitCast(Op, VecTy, "cast");
496 
497   // We'll be shuffling in zeroes.
498   Value *Res = Constant::getNullValue(VecTy);
499 
500   // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
501   // we'll just return the zero vector.
502   if (Shift < 16) {
503     uint32_t Idxs[64];
504     // 256/512-bit version is split into 2/4 16-byte lanes.
505     for (unsigned l = 0; l != NumElts; l += 16)
506       for (unsigned i = 0; i != 16; ++i) {
507         unsigned Idx = NumElts + i - Shift;
508         if (Idx < NumElts)
509           Idx -= NumElts - 16; // end of lane, switch operand.
510         Idxs[l + i] = Idx + l;
511       }
512 
513     Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts));
514   }
515 
516   // Bitcast back to a 64-bit element type.
517   return Builder.CreateBitCast(Res, ResultTy, "cast");
518 }
519 
520 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them
521 // to byte shuffles.
522 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op,
523                                          unsigned Shift) {
524   Type *ResultTy = Op->getType();
525   unsigned NumElts = ResultTy->getVectorNumElements() * 8;
526 
527   // Bitcast from a 64-bit element type to a byte element type.
528   Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts);
529   Op = Builder.CreateBitCast(Op, VecTy, "cast");
530 
531   // We'll be shuffling in zeroes.
532   Value *Res = Constant::getNullValue(VecTy);
533 
534   // If shift is less than 16, emit a shuffle to move the bytes. Otherwise,
535   // we'll just return the zero vector.
536   if (Shift < 16) {
537     uint32_t Idxs[64];
538     // 256/512-bit version is split into 2/4 16-byte lanes.
539     for (unsigned l = 0; l != NumElts; l += 16)
540       for (unsigned i = 0; i != 16; ++i) {
541         unsigned Idx = i + Shift;
542         if (Idx >= 16)
543           Idx += NumElts - 16; // end of lane, switch operand.
544         Idxs[l + i] = Idx + l;
545       }
546 
547     Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts));
548   }
549 
550   // Bitcast back to a 64-bit element type.
551   return Builder.CreateBitCast(Res, ResultTy, "cast");
552 }
553 
554 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask,
555                             unsigned NumElts) {
556   llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(),
557                              cast<IntegerType>(Mask->getType())->getBitWidth());
558   Mask = Builder.CreateBitCast(Mask, MaskTy);
559 
560   // If we have less than 8 elements, then the starting mask was an i8 and
561   // we need to extract down to the right number of elements.
562   if (NumElts < 8) {
563     uint32_t Indices[4];
564     for (unsigned i = 0; i != NumElts; ++i)
565       Indices[i] = i;
566     Mask = Builder.CreateShuffleVector(Mask, Mask,
567                                        makeArrayRef(Indices, NumElts),
568                                        "extract");
569   }
570 
571   return Mask;
572 }
573 
574 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask,
575                             Value *Op0, Value *Op1) {
576   // If the mask is all ones just emit the align operation.
577   if (const auto *C = dyn_cast<Constant>(Mask))
578     if (C->isAllOnesValue())
579       return Op0;
580 
581   Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements());
582   return Builder.CreateSelect(Mask, Op0, Op1);
583 }
584 
585 static Value *UpgradeX86PALIGNRIntrinsics(IRBuilder<> &Builder,
586                                           Value *Op0, Value *Op1, Value *Shift,
587                                           Value *Passthru, Value *Mask) {
588   unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue();
589 
590   unsigned NumElts = Op0->getType()->getVectorNumElements();
591   assert(NumElts % 16 == 0);
592 
593   // If palignr is shifting the pair of vectors more than the size of two
594   // lanes, emit zero.
595   if (ShiftVal >= 32)
596     return llvm::Constant::getNullValue(Op0->getType());
597 
598   // If palignr is shifting the pair of input vectors more than one lane,
599   // but less than two lanes, convert to shifting in zeroes.
600   if (ShiftVal > 16) {
601     ShiftVal -= 16;
602     Op1 = Op0;
603     Op0 = llvm::Constant::getNullValue(Op0->getType());
604   }
605 
606   uint32_t Indices[64];
607   // 256-bit palignr operates on 128-bit lanes so we need to handle that
608   for (unsigned l = 0; l != NumElts; l += 16) {
609     for (unsigned i = 0; i != 16; ++i) {
610       unsigned Idx = ShiftVal + i;
611       if (Idx >= 16)
612         Idx += NumElts - 16; // End of lane, switch operand.
613       Indices[l + i] = Idx + l;
614     }
615   }
616 
617   Value *Align = Builder.CreateShuffleVector(Op1, Op0,
618                                              makeArrayRef(Indices, NumElts),
619                                              "palignr");
620 
621   return EmitX86Select(Builder, Mask, Align, Passthru);
622 }
623 
624 static Value *UpgradeMaskedStore(IRBuilder<> &Builder,
625                                  Value *Ptr, Value *Data, Value *Mask,
626                                  bool Aligned) {
627   // Cast the pointer to the right type.
628   Ptr = Builder.CreateBitCast(Ptr,
629                               llvm::PointerType::getUnqual(Data->getType()));
630   unsigned Align =
631     Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1;
632 
633   // If the mask is all ones just emit a regular store.
634   if (const auto *C = dyn_cast<Constant>(Mask))
635     if (C->isAllOnesValue())
636       return Builder.CreateAlignedStore(Data, Ptr, Align);
637 
638   // Convert the mask from an integer type to a vector of i1.
639   unsigned NumElts = Data->getType()->getVectorNumElements();
640   Mask = getX86MaskVec(Builder, Mask, NumElts);
641   return Builder.CreateMaskedStore(Data, Ptr, Align, Mask);
642 }
643 
644 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder,
645                                 Value *Ptr, Value *Passthru, Value *Mask,
646                                 bool Aligned) {
647   // Cast the pointer to the right type.
648   Ptr = Builder.CreateBitCast(Ptr,
649                              llvm::PointerType::getUnqual(Passthru->getType()));
650   unsigned Align =
651     Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1;
652 
653   // If the mask is all ones just emit a regular store.
654   if (const auto *C = dyn_cast<Constant>(Mask))
655     if (C->isAllOnesValue())
656       return Builder.CreateAlignedLoad(Ptr, Align);
657 
658   // Convert the mask from an integer type to a vector of i1.
659   unsigned NumElts = Passthru->getType()->getVectorNumElements();
660   Mask = getX86MaskVec(Builder, Mask, NumElts);
661   return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru);
662 }
663 
664 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI,
665                                ICmpInst::Predicate Pred) {
666   Value *Op0 = CI.getArgOperand(0);
667   Value *Op1 = CI.getArgOperand(1);
668   Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1);
669   Value *Res = Builder.CreateSelect(Cmp, Op0, Op1);
670 
671   if (CI.getNumArgOperands() == 4)
672     Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2));
673 
674   return Res;
675 }
676 
677 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI,
678                                    ICmpInst::Predicate Pred) {
679   Value *Op0 = CI.getArgOperand(0);
680   unsigned NumElts = Op0->getType()->getVectorNumElements();
681   Value *Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1));
682 
683   Value *Mask = CI.getArgOperand(2);
684   const auto *C = dyn_cast<Constant>(Mask);
685   if (!C || !C->isAllOnesValue())
686     Cmp = Builder.CreateAnd(Cmp, getX86MaskVec(Builder, Mask, NumElts));
687 
688   if (NumElts < 8) {
689     uint32_t Indices[8];
690     for (unsigned i = 0; i != NumElts; ++i)
691       Indices[i] = i;
692     for (unsigned i = NumElts; i != 8; ++i)
693       Indices[i] = NumElts + i % NumElts;
694     Cmp = Builder.CreateShuffleVector(Cmp,
695                                       Constant::getNullValue(Cmp->getType()),
696                                       Indices);
697   }
698   return Builder.CreateBitCast(Cmp, IntegerType::get(CI.getContext(),
699                                                      std::max(NumElts, 8U)));
700 }
701 
702 // Replace a masked intrinsic with an older unmasked intrinsic.
703 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI,
704                                     Intrinsic::ID IID) {
705   Function *F = CI.getCalledFunction();
706   Function *Intrin = Intrinsic::getDeclaration(F->getParent(), IID);
707   Value *Rep = Builder.CreateCall(Intrin,
708                                  { CI.getArgOperand(0), CI.getArgOperand(1) });
709   return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2));
710 }
711 
712 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) {
713   Value* A = CI.getArgOperand(0);
714   Value* B = CI.getArgOperand(1);
715   Value* Src = CI.getArgOperand(2);
716   Value* Mask = CI.getArgOperand(3);
717 
718   Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1));
719   Value* Cmp = Builder.CreateIsNotNull(AndNode);
720   Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0);
721   Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0);
722   Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2);
723   return Builder.CreateInsertElement(A, Select, (uint64_t)0);
724 }
725 
726 /// Upgrade a call to an old intrinsic. All argument and return casting must be
727 /// provided to seamlessly integrate with existing context.
728 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
729   Function *F = CI->getCalledFunction();
730   LLVMContext &C = CI->getContext();
731   IRBuilder<> Builder(C);
732   Builder.SetInsertPoint(CI->getParent(), CI->getIterator());
733 
734   assert(F && "Intrinsic call is not direct?");
735 
736   if (!NewFn) {
737     // Get the Function's name.
738     StringRef Name = F->getName();
739 
740     assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'");
741     Name = Name.substr(5);
742 
743     bool IsX86 = Name.startswith("x86.");
744     if (IsX86)
745       Name = Name.substr(4);
746 
747     Value *Rep;
748     // Upgrade packed integer vector compare intrinsics to compare instructions.
749     if (IsX86 && (Name.startswith("sse2.pcmpeq.") ||
750                   Name.startswith("avx2.pcmpeq."))) {
751       Rep = Builder.CreateICmpEQ(CI->getArgOperand(0), CI->getArgOperand(1),
752                                  "pcmpeq");
753       Rep = Builder.CreateSExt(Rep, CI->getType(), "");
754     } else if (IsX86 && (Name.startswith("sse2.pcmpgt.") ||
755                          Name.startswith("avx2.pcmpgt."))) {
756       Rep = Builder.CreateICmpSGT(CI->getArgOperand(0), CI->getArgOperand(1),
757                                   "pcmpgt");
758       Rep = Builder.CreateSExt(Rep, CI->getType(), "");
759     } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd")) {
760       Type *I32Ty = Type::getInt32Ty(C);
761       Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
762                                                  ConstantInt::get(I32Ty, 0));
763       Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
764                                                  ConstantInt::get(I32Ty, 0));
765       Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
766                                         Builder.CreateFAdd(Elt0, Elt1),
767                                         ConstantInt::get(I32Ty, 0));
768     } else if (IsX86 && (Name == "sse.sub.ss" || Name == "sse2.sub.sd")) {
769       Type *I32Ty = Type::getInt32Ty(C);
770       Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
771                                                  ConstantInt::get(I32Ty, 0));
772       Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
773                                                  ConstantInt::get(I32Ty, 0));
774       Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
775                                         Builder.CreateFSub(Elt0, Elt1),
776                                         ConstantInt::get(I32Ty, 0));
777     } else if (IsX86 && (Name == "sse.mul.ss" || Name == "sse2.mul.sd")) {
778       Type *I32Ty = Type::getInt32Ty(C);
779       Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
780                                                  ConstantInt::get(I32Ty, 0));
781       Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
782                                                  ConstantInt::get(I32Ty, 0));
783       Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
784                                         Builder.CreateFMul(Elt0, Elt1),
785                                         ConstantInt::get(I32Ty, 0));
786     } else if (IsX86 && (Name == "sse.div.ss" || Name == "sse2.div.sd")) {
787       Type *I32Ty = Type::getInt32Ty(C);
788       Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0),
789                                                  ConstantInt::get(I32Ty, 0));
790       Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1),
791                                                  ConstantInt::get(I32Ty, 0));
792       Rep = Builder.CreateInsertElement(CI->getArgOperand(0),
793                                         Builder.CreateFDiv(Elt0, Elt1),
794                                         ConstantInt::get(I32Ty, 0));
795     } else if (IsX86 && Name.startswith("avx512.mask.pcmpeq.")) {
796       Rep = upgradeMaskedCompare(Builder, *CI, ICmpInst::ICMP_EQ);
797     } else if (IsX86 && Name.startswith("avx512.mask.pcmpgt.")) {
798       Rep = upgradeMaskedCompare(Builder, *CI, ICmpInst::ICMP_SGT);
799     } else if (IsX86 && (Name == "sse41.pmaxsb" ||
800                          Name == "sse2.pmaxs.w" ||
801                          Name == "sse41.pmaxsd" ||
802                          Name.startswith("avx2.pmaxs") ||
803                          Name.startswith("avx512.mask.pmaxs"))) {
804       Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT);
805     } else if (IsX86 && (Name == "sse2.pmaxu.b" ||
806                          Name == "sse41.pmaxuw" ||
807                          Name == "sse41.pmaxud" ||
808                          Name.startswith("avx2.pmaxu") ||
809                          Name.startswith("avx512.mask.pmaxu"))) {
810       Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT);
811     } else if (IsX86 && (Name == "sse41.pminsb" ||
812                          Name == "sse2.pmins.w" ||
813                          Name == "sse41.pminsd" ||
814                          Name.startswith("avx2.pmins") ||
815                          Name.startswith("avx512.mask.pmins"))) {
816       Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT);
817     } else if (IsX86 && (Name == "sse2.pminu.b" ||
818                          Name == "sse41.pminuw" ||
819                          Name == "sse41.pminud" ||
820                          Name.startswith("avx2.pminu") ||
821                          Name.startswith("avx512.mask.pminu"))) {
822       Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT);
823     } else if (IsX86 && (Name == "sse2.cvtdq2pd" ||
824                          Name == "sse2.cvtps2pd" ||
825                          Name == "avx.cvtdq2.pd.256" ||
826                          Name == "avx.cvt.ps2.pd.256" ||
827                          Name.startswith("avx512.mask.cvtdq2pd.") ||
828                          Name.startswith("avx512.mask.cvtudq2pd."))) {
829       // Lossless i32/float to double conversion.
830       // Extract the bottom elements if necessary and convert to double vector.
831       Value *Src = CI->getArgOperand(0);
832       VectorType *SrcTy = cast<VectorType>(Src->getType());
833       VectorType *DstTy = cast<VectorType>(CI->getType());
834       Rep = CI->getArgOperand(0);
835 
836       unsigned NumDstElts = DstTy->getNumElements();
837       if (NumDstElts < SrcTy->getNumElements()) {
838         assert(NumDstElts == 2 && "Unexpected vector size");
839         uint32_t ShuffleMask[2] = { 0, 1 };
840         Rep = Builder.CreateShuffleVector(Rep, UndefValue::get(SrcTy),
841                                           ShuffleMask);
842       }
843 
844       bool SInt2Double = (StringRef::npos != Name.find("cvtdq2"));
845       bool UInt2Double = (StringRef::npos != Name.find("cvtudq2"));
846       if (SInt2Double)
847         Rep = Builder.CreateSIToFP(Rep, DstTy, "cvtdq2pd");
848       else if (UInt2Double)
849         Rep = Builder.CreateUIToFP(Rep, DstTy, "cvtudq2pd");
850       else
851         Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd");
852 
853       if (CI->getNumArgOperands() == 3)
854         Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
855                             CI->getArgOperand(1));
856     } else if (IsX86 && Name.startswith("sse4a.movnt.")) {
857       Module *M = F->getParent();
858       SmallVector<Metadata *, 1> Elts;
859       Elts.push_back(
860           ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
861       MDNode *Node = MDNode::get(C, Elts);
862 
863       Value *Arg0 = CI->getArgOperand(0);
864       Value *Arg1 = CI->getArgOperand(1);
865 
866       // Nontemporal (unaligned) store of the 0'th element of the float/double
867       // vector.
868       Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType();
869       PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy);
870       Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast");
871       Value *Extract =
872           Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement");
873 
874       StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1);
875       SI->setMetadata(M->getMDKindID("nontemporal"), Node);
876 
877       // Remove intrinsic.
878       CI->eraseFromParent();
879       return;
880     } else if (IsX86 && (Name.startswith("avx.movnt.") ||
881                          Name.startswith("avx512.storent."))) {
882       Module *M = F->getParent();
883       SmallVector<Metadata *, 1> Elts;
884       Elts.push_back(
885           ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1)));
886       MDNode *Node = MDNode::get(C, Elts);
887 
888       Value *Arg0 = CI->getArgOperand(0);
889       Value *Arg1 = CI->getArgOperand(1);
890 
891       // Convert the type of the pointer to a pointer to the stored type.
892       Value *BC = Builder.CreateBitCast(Arg0,
893                                         PointerType::getUnqual(Arg1->getType()),
894                                         "cast");
895       VectorType *VTy = cast<VectorType>(Arg1->getType());
896       StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC,
897                                                  VTy->getBitWidth() / 8);
898       SI->setMetadata(M->getMDKindID("nontemporal"), Node);
899 
900       // Remove intrinsic.
901       CI->eraseFromParent();
902       return;
903     } else if (IsX86 && Name == "sse2.storel.dq") {
904       Value *Arg0 = CI->getArgOperand(0);
905       Value *Arg1 = CI->getArgOperand(1);
906 
907       Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
908       Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
909       Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0);
910       Value *BC = Builder.CreateBitCast(Arg0,
911                                         PointerType::getUnqual(Elt->getType()),
912                                         "cast");
913       Builder.CreateAlignedStore(Elt, BC, 1);
914 
915       // Remove intrinsic.
916       CI->eraseFromParent();
917       return;
918     } else if (IsX86 && (Name.startswith("sse.storeu.") ||
919                          Name.startswith("sse2.storeu.") ||
920                          Name.startswith("avx.storeu."))) {
921       Value *Arg0 = CI->getArgOperand(0);
922       Value *Arg1 = CI->getArgOperand(1);
923 
924       Arg0 = Builder.CreateBitCast(Arg0,
925                                    PointerType::getUnqual(Arg1->getType()),
926                                    "cast");
927       Builder.CreateAlignedStore(Arg1, Arg0, 1);
928 
929       // Remove intrinsic.
930       CI->eraseFromParent();
931       return;
932     } else if (IsX86 && (Name.startswith("avx512.mask.storeu."))) {
933       UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
934                          CI->getArgOperand(2), /*Aligned*/false);
935 
936       // Remove intrinsic.
937       CI->eraseFromParent();
938       return;
939     } else if (IsX86 && (Name.startswith("avx512.mask.store."))) {
940       UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1),
941                          CI->getArgOperand(2), /*Aligned*/true);
942 
943       // Remove intrinsic.
944       CI->eraseFromParent();
945       return;
946     } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) {
947       Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0),
948                               CI->getArgOperand(1), CI->getArgOperand(2),
949                               /*Aligned*/false);
950     } else if (IsX86 && (Name.startswith("avx512.mask.load."))) {
951       Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0),
952                               CI->getArgOperand(1),CI->getArgOperand(2),
953                               /*Aligned*/true);
954     } else if (IsX86 && Name.startswith("xop.vpcom")) {
955       Intrinsic::ID intID;
956       if (Name.endswith("ub"))
957         intID = Intrinsic::x86_xop_vpcomub;
958       else if (Name.endswith("uw"))
959         intID = Intrinsic::x86_xop_vpcomuw;
960       else if (Name.endswith("ud"))
961         intID = Intrinsic::x86_xop_vpcomud;
962       else if (Name.endswith("uq"))
963         intID = Intrinsic::x86_xop_vpcomuq;
964       else if (Name.endswith("b"))
965         intID = Intrinsic::x86_xop_vpcomb;
966       else if (Name.endswith("w"))
967         intID = Intrinsic::x86_xop_vpcomw;
968       else if (Name.endswith("d"))
969         intID = Intrinsic::x86_xop_vpcomd;
970       else if (Name.endswith("q"))
971         intID = Intrinsic::x86_xop_vpcomq;
972       else
973         llvm_unreachable("Unknown suffix");
974 
975       Name = Name.substr(9); // strip off "xop.vpcom"
976       unsigned Imm;
977       if (Name.startswith("lt"))
978         Imm = 0;
979       else if (Name.startswith("le"))
980         Imm = 1;
981       else if (Name.startswith("gt"))
982         Imm = 2;
983       else if (Name.startswith("ge"))
984         Imm = 3;
985       else if (Name.startswith("eq"))
986         Imm = 4;
987       else if (Name.startswith("ne"))
988         Imm = 5;
989       else if (Name.startswith("false"))
990         Imm = 6;
991       else if (Name.startswith("true"))
992         Imm = 7;
993       else
994         llvm_unreachable("Unknown condition");
995 
996       Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID);
997       Rep =
998           Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1),
999                                      Builder.getInt8(Imm)});
1000     } else if (IsX86 && Name == "xop.vpcmov") {
1001       Value *Arg0 = CI->getArgOperand(0);
1002       Value *Arg1 = CI->getArgOperand(1);
1003       Value *Sel = CI->getArgOperand(2);
1004       unsigned NumElts = CI->getType()->getVectorNumElements();
1005       Constant *MinusOne = ConstantVector::getSplat(NumElts, Builder.getInt64(-1));
1006       Value *NotSel = Builder.CreateXor(Sel, MinusOne);
1007       Value *Sel0 = Builder.CreateAnd(Arg0, Sel);
1008       Value *Sel1 = Builder.CreateAnd(Arg1, NotSel);
1009       Rep = Builder.CreateOr(Sel0, Sel1);
1010     } else if (IsX86 && Name == "sse42.crc32.64.8") {
1011       Function *CRC32 = Intrinsic::getDeclaration(F->getParent(),
1012                                                Intrinsic::x86_sse42_crc32_32_8);
1013       Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C));
1014       Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)});
1015       Rep = Builder.CreateZExt(Rep, CI->getType(), "");
1016     } else if (IsX86 && Name.startswith("avx.vbroadcast.s")) {
1017       // Replace broadcasts with a series of insertelements.
1018       Type *VecTy = CI->getType();
1019       Type *EltTy = VecTy->getVectorElementType();
1020       unsigned EltNum = VecTy->getVectorNumElements();
1021       Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0),
1022                                           EltTy->getPointerTo());
1023       Value *Load = Builder.CreateLoad(EltTy, Cast);
1024       Type *I32Ty = Type::getInt32Ty(C);
1025       Rep = UndefValue::get(VecTy);
1026       for (unsigned I = 0; I < EltNum; ++I)
1027         Rep = Builder.CreateInsertElement(Rep, Load,
1028                                           ConstantInt::get(I32Ty, I));
1029     } else if (IsX86 && (Name.startswith("sse41.pmovsx") ||
1030                          Name.startswith("sse41.pmovzx") ||
1031                          Name.startswith("avx2.pmovsx") ||
1032                          Name.startswith("avx2.pmovzx") ||
1033                          Name.startswith("avx512.mask.pmovsx") ||
1034                          Name.startswith("avx512.mask.pmovzx"))) {
1035       VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType());
1036       VectorType *DstTy = cast<VectorType>(CI->getType());
1037       unsigned NumDstElts = DstTy->getNumElements();
1038 
1039       // Extract a subvector of the first NumDstElts lanes and sign/zero extend.
1040       SmallVector<uint32_t, 8> ShuffleMask(NumDstElts);
1041       for (unsigned i = 0; i != NumDstElts; ++i)
1042         ShuffleMask[i] = i;
1043 
1044       Value *SV = Builder.CreateShuffleVector(
1045           CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask);
1046 
1047       bool DoSext = (StringRef::npos != Name.find("pmovsx"));
1048       Rep = DoSext ? Builder.CreateSExt(SV, DstTy)
1049                    : Builder.CreateZExt(SV, DstTy);
1050       // If there are 3 arguments, it's a masked intrinsic so we need a select.
1051       if (CI->getNumArgOperands() == 3)
1052         Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1053                             CI->getArgOperand(1));
1054     } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") ||
1055                          Name == "avx2.vbroadcasti128")) {
1056       // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle.
1057       Type *EltTy = CI->getType()->getVectorElementType();
1058       unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits();
1059       Type *VT = VectorType::get(EltTy, NumSrcElts);
1060       Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0),
1061                                             PointerType::getUnqual(VT));
1062       Value *Load = Builder.CreateAlignedLoad(Op, 1);
1063       if (NumSrcElts == 2)
1064         Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
1065                                           { 0, 1, 0, 1 });
1066       else
1067         Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()),
1068                                           { 0, 1, 2, 3, 0, 1, 2, 3 });
1069     } else if (IsX86 && (Name.startswith("avx2.pbroadcast") ||
1070                          Name.startswith("avx2.vbroadcast") ||
1071                          Name.startswith("avx512.pbroadcast") ||
1072                          Name.startswith("avx512.mask.broadcast.s"))) {
1073       // Replace vp?broadcasts with a vector shuffle.
1074       Value *Op = CI->getArgOperand(0);
1075       unsigned NumElts = CI->getType()->getVectorNumElements();
1076       Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts);
1077       Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()),
1078                                         Constant::getNullValue(MaskTy));
1079 
1080       if (CI->getNumArgOperands() == 3)
1081         Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1082                             CI->getArgOperand(1));
1083     } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) {
1084       Rep = UpgradeX86PALIGNRIntrinsics(Builder, CI->getArgOperand(0),
1085                                         CI->getArgOperand(1),
1086                                         CI->getArgOperand(2),
1087                                         CI->getArgOperand(3),
1088                                         CI->getArgOperand(4));
1089     } else if (IsX86 && (Name == "sse2.psll.dq" ||
1090                          Name == "avx2.psll.dq")) {
1091       // 128/256-bit shift left specified in bits.
1092       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1093       Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0),
1094                                        Shift / 8); // Shift is in bits.
1095     } else if (IsX86 && (Name == "sse2.psrl.dq" ||
1096                          Name == "avx2.psrl.dq")) {
1097       // 128/256-bit shift right specified in bits.
1098       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1099       Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0),
1100                                        Shift / 8); // Shift is in bits.
1101     } else if (IsX86 && (Name == "sse2.psll.dq.bs" ||
1102                          Name == "avx2.psll.dq.bs" ||
1103                          Name == "avx512.psll.dq.512")) {
1104       // 128/256/512-bit shift left specified in bytes.
1105       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1106       Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
1107     } else if (IsX86 && (Name == "sse2.psrl.dq.bs" ||
1108                          Name == "avx2.psrl.dq.bs" ||
1109                          Name == "avx512.psrl.dq.512")) {
1110       // 128/256/512-bit shift right specified in bytes.
1111       unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1112       Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift);
1113     } else if (IsX86 && (Name == "sse41.pblendw" ||
1114                          Name.startswith("sse41.blendp") ||
1115                          Name.startswith("avx.blend.p") ||
1116                          Name == "avx2.pblendw" ||
1117                          Name.startswith("avx2.pblendd."))) {
1118       Value *Op0 = CI->getArgOperand(0);
1119       Value *Op1 = CI->getArgOperand(1);
1120       unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1121       VectorType *VecTy = cast<VectorType>(CI->getType());
1122       unsigned NumElts = VecTy->getNumElements();
1123 
1124       SmallVector<uint32_t, 16> Idxs(NumElts);
1125       for (unsigned i = 0; i != NumElts; ++i)
1126         Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i;
1127 
1128       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1129     } else if (IsX86 && (Name.startswith("avx.vinsertf128.") ||
1130                          Name == "avx2.vinserti128")) {
1131       Value *Op0 = CI->getArgOperand(0);
1132       Value *Op1 = CI->getArgOperand(1);
1133       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1134       VectorType *VecTy = cast<VectorType>(CI->getType());
1135       unsigned NumElts = VecTy->getNumElements();
1136 
1137       // Mask off the high bits of the immediate value; hardware ignores those.
1138       Imm = Imm & 1;
1139 
1140       // Extend the second operand into a vector that is twice as big.
1141       Value *UndefV = UndefValue::get(Op1->getType());
1142       SmallVector<uint32_t, 8> Idxs(NumElts);
1143       for (unsigned i = 0; i != NumElts; ++i)
1144         Idxs[i] = i;
1145       Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs);
1146 
1147       // Insert the second operand into the first operand.
1148 
1149       // Note that there is no guarantee that instruction lowering will actually
1150       // produce a vinsertf128 instruction for the created shuffles. In
1151       // particular, the 0 immediate case involves no lane changes, so it can
1152       // be handled as a blend.
1153 
1154       // Example of shuffle mask for 32-bit elements:
1155       // Imm = 1  <i32 0, i32 1, i32 2,  i32 3,  i32 8, i32 9, i32 10, i32 11>
1156       // Imm = 0  <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6,  i32 7 >
1157 
1158       // The low half of the result is either the low half of the 1st operand
1159       // or the low half of the 2nd operand (the inserted vector).
1160       for (unsigned i = 0; i != NumElts / 2; ++i)
1161         Idxs[i] = Imm ? i : (i + NumElts);
1162       // The high half of the result is either the low half of the 2nd operand
1163       // (the inserted vector) or the high half of the 1st operand.
1164       for (unsigned i = NumElts / 2; i != NumElts; ++i)
1165         Idxs[i] = Imm ? (i + NumElts / 2) : i;
1166       Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs);
1167     } else if (IsX86 && (Name.startswith("avx.vextractf128.") ||
1168                          Name == "avx2.vextracti128")) {
1169       Value *Op0 = CI->getArgOperand(0);
1170       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1171       VectorType *VecTy = cast<VectorType>(CI->getType());
1172       unsigned NumElts = VecTy->getNumElements();
1173 
1174       // Mask off the high bits of the immediate value; hardware ignores those.
1175       Imm = Imm & 1;
1176 
1177       // Get indexes for either the high half or low half of the input vector.
1178       SmallVector<uint32_t, 4> Idxs(NumElts);
1179       for (unsigned i = 0; i != NumElts; ++i) {
1180         Idxs[i] = Imm ? (i + NumElts) : i;
1181       }
1182 
1183       Value *UndefV = UndefValue::get(Op0->getType());
1184       Rep = Builder.CreateShuffleVector(Op0, UndefV, Idxs);
1185     } else if (!IsX86 && Name == "stackprotectorcheck") {
1186       Rep = nullptr;
1187     } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") ||
1188                          Name.startswith("avx512.mask.perm.di."))) {
1189       Value *Op0 = CI->getArgOperand(0);
1190       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1191       VectorType *VecTy = cast<VectorType>(CI->getType());
1192       unsigned NumElts = VecTy->getNumElements();
1193 
1194       SmallVector<uint32_t, 8> Idxs(NumElts);
1195       for (unsigned i = 0; i != NumElts; ++i)
1196         Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3);
1197 
1198       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1199 
1200       if (CI->getNumArgOperands() == 4)
1201         Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1202                             CI->getArgOperand(2));
1203     } else if (IsX86 && (Name.startswith("avx.vpermil.") ||
1204                          Name == "sse2.pshuf.d" ||
1205                          Name.startswith("avx512.mask.vpermil.p") ||
1206                          Name.startswith("avx512.mask.pshuf.d."))) {
1207       Value *Op0 = CI->getArgOperand(0);
1208       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1209       VectorType *VecTy = cast<VectorType>(CI->getType());
1210       unsigned NumElts = VecTy->getNumElements();
1211       // Calculate the size of each index in the immediate.
1212       unsigned IdxSize = 64 / VecTy->getScalarSizeInBits();
1213       unsigned IdxMask = ((1 << IdxSize) - 1);
1214 
1215       SmallVector<uint32_t, 8> Idxs(NumElts);
1216       // Lookup the bits for this element, wrapping around the immediate every
1217       // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need
1218       // to offset by the first index of each group.
1219       for (unsigned i = 0; i != NumElts; ++i)
1220         Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask);
1221 
1222       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1223 
1224       if (CI->getNumArgOperands() == 4)
1225         Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1226                             CI->getArgOperand(2));
1227     } else if (IsX86 && (Name == "sse2.pshufl.w" ||
1228                          Name.startswith("avx512.mask.pshufl.w."))) {
1229       Value *Op0 = CI->getArgOperand(0);
1230       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1231       unsigned NumElts = CI->getType()->getVectorNumElements();
1232 
1233       SmallVector<uint32_t, 16> Idxs(NumElts);
1234       for (unsigned l = 0; l != NumElts; l += 8) {
1235         for (unsigned i = 0; i != 4; ++i)
1236           Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l;
1237         for (unsigned i = 4; i != 8; ++i)
1238           Idxs[i + l] = i + l;
1239       }
1240 
1241       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1242 
1243       if (CI->getNumArgOperands() == 4)
1244         Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1245                             CI->getArgOperand(2));
1246     } else if (IsX86 && (Name == "sse2.pshufh.w" ||
1247                          Name.startswith("avx512.mask.pshufh.w."))) {
1248       Value *Op0 = CI->getArgOperand(0);
1249       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue();
1250       unsigned NumElts = CI->getType()->getVectorNumElements();
1251 
1252       SmallVector<uint32_t, 16> Idxs(NumElts);
1253       for (unsigned l = 0; l != NumElts; l += 8) {
1254         for (unsigned i = 0; i != 4; ++i)
1255           Idxs[i + l] = i + l;
1256         for (unsigned i = 0; i != 4; ++i)
1257           Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l;
1258       }
1259 
1260       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1261 
1262       if (CI->getNumArgOperands() == 4)
1263         Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1264                             CI->getArgOperand(2));
1265     } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) {
1266       Value *Op0 = CI->getArgOperand(0);
1267       Value *Op1 = CI->getArgOperand(1);
1268       unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
1269       unsigned NumElts = CI->getType()->getVectorNumElements();
1270 
1271       unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
1272       unsigned HalfLaneElts = NumLaneElts / 2;
1273 
1274       SmallVector<uint32_t, 16> Idxs(NumElts);
1275       for (unsigned i = 0; i != NumElts; ++i) {
1276         // Base index is the starting element of the lane.
1277         Idxs[i] = i - (i % NumLaneElts);
1278         // If we are half way through the lane switch to the other source.
1279         if ((i % NumLaneElts) >= HalfLaneElts)
1280           Idxs[i] += NumElts;
1281         // Now select the specific element. By adding HalfLaneElts bits from
1282         // the immediate. Wrapping around the immediate every 8-bits.
1283         Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1);
1284       }
1285 
1286       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1287 
1288       Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep,
1289                           CI->getArgOperand(3));
1290     } else if (IsX86 && (Name.startswith("avx512.mask.movddup") ||
1291                          Name.startswith("avx512.mask.movshdup") ||
1292                          Name.startswith("avx512.mask.movsldup"))) {
1293       Value *Op0 = CI->getArgOperand(0);
1294       unsigned NumElts = CI->getType()->getVectorNumElements();
1295       unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
1296 
1297       unsigned Offset = 0;
1298       if (Name.startswith("avx512.mask.movshdup."))
1299         Offset = 1;
1300 
1301       SmallVector<uint32_t, 16> Idxs(NumElts);
1302       for (unsigned l = 0; l != NumElts; l += NumLaneElts)
1303         for (unsigned i = 0; i != NumLaneElts; i += 2) {
1304           Idxs[i + l + 0] = i + l + Offset;
1305           Idxs[i + l + 1] = i + l + Offset;
1306         }
1307 
1308       Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs);
1309 
1310       Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep,
1311                           CI->getArgOperand(1));
1312     } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") ||
1313                          Name.startswith("avx512.mask.unpckl."))) {
1314       Value *Op0 = CI->getArgOperand(0);
1315       Value *Op1 = CI->getArgOperand(1);
1316       int NumElts = CI->getType()->getVectorNumElements();
1317       int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
1318 
1319       SmallVector<uint32_t, 64> Idxs(NumElts);
1320       for (int l = 0; l != NumElts; l += NumLaneElts)
1321         for (int i = 0; i != NumLaneElts; ++i)
1322           Idxs[i + l] = l + (i / 2) + NumElts * (i % 2);
1323 
1324       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1325 
1326       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1327                           CI->getArgOperand(2));
1328     } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") ||
1329                          Name.startswith("avx512.mask.unpckh."))) {
1330       Value *Op0 = CI->getArgOperand(0);
1331       Value *Op1 = CI->getArgOperand(1);
1332       int NumElts = CI->getType()->getVectorNumElements();
1333       int NumLaneElts = 128/CI->getType()->getScalarSizeInBits();
1334 
1335       SmallVector<uint32_t, 64> Idxs(NumElts);
1336       for (int l = 0; l != NumElts; l += NumLaneElts)
1337         for (int i = 0; i != NumLaneElts; ++i)
1338           Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2);
1339 
1340       Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs);
1341 
1342       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1343                           CI->getArgOperand(2));
1344     } else if (IsX86 && Name.startswith("avx512.mask.pand.")) {
1345       Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1));
1346       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1347                           CI->getArgOperand(2));
1348     } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) {
1349       Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)),
1350                               CI->getArgOperand(1));
1351       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1352                           CI->getArgOperand(2));
1353     } else if (IsX86 && Name.startswith("avx512.mask.por.")) {
1354       Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1));
1355       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1356                           CI->getArgOperand(2));
1357     } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) {
1358       Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1));
1359       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1360                           CI->getArgOperand(2));
1361     } else if (IsX86 && Name.startswith("avx512.mask.and.")) {
1362       VectorType *FTy = cast<VectorType>(CI->getType());
1363       VectorType *ITy = VectorType::getInteger(FTy);
1364       Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
1365                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
1366       Rep = Builder.CreateBitCast(Rep, FTy);
1367       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1368                           CI->getArgOperand(2));
1369     } else if (IsX86 && Name.startswith("avx512.mask.andn.")) {
1370       VectorType *FTy = cast<VectorType>(CI->getType());
1371       VectorType *ITy = VectorType::getInteger(FTy);
1372       Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy));
1373       Rep = Builder.CreateAnd(Rep,
1374                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
1375       Rep = Builder.CreateBitCast(Rep, FTy);
1376       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1377                           CI->getArgOperand(2));
1378     } else if (IsX86 && Name.startswith("avx512.mask.or.")) {
1379       VectorType *FTy = cast<VectorType>(CI->getType());
1380       VectorType *ITy = VectorType::getInteger(FTy);
1381       Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
1382                              Builder.CreateBitCast(CI->getArgOperand(1), ITy));
1383       Rep = Builder.CreateBitCast(Rep, FTy);
1384       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1385                           CI->getArgOperand(2));
1386     } else if (IsX86 && Name.startswith("avx512.mask.xor.")) {
1387       VectorType *FTy = cast<VectorType>(CI->getType());
1388       VectorType *ITy = VectorType::getInteger(FTy);
1389       Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy),
1390                               Builder.CreateBitCast(CI->getArgOperand(1), ITy));
1391       Rep = Builder.CreateBitCast(Rep, FTy);
1392       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1393                           CI->getArgOperand(2));
1394     } else if (IsX86 && Name.startswith("avx512.mask.padd.")) {
1395       Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1));
1396       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1397                           CI->getArgOperand(2));
1398     } else if (IsX86 && Name.startswith("avx512.mask.psub.")) {
1399       Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1));
1400       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1401                           CI->getArgOperand(2));
1402     } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) {
1403       Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1));
1404       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1405                           CI->getArgOperand(2));
1406     } else if (IsX86 && (Name.startswith("avx512.mask.add.p"))) {
1407       Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1));
1408       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1409                           CI->getArgOperand(2));
1410     } else if (IsX86 && Name.startswith("avx512.mask.div.p")) {
1411       Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1));
1412       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1413                           CI->getArgOperand(2));
1414     } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) {
1415       Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1));
1416       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1417                           CI->getArgOperand(2));
1418     } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) {
1419       Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1));
1420       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1421                           CI->getArgOperand(2));
1422     } else if (IsX86 && Name.startswith("avx512.mask.pshuf.b.")) {
1423       VectorType *VecTy = cast<VectorType>(CI->getType());
1424       Intrinsic::ID IID;
1425       if (VecTy->getPrimitiveSizeInBits() == 128)
1426         IID = Intrinsic::x86_ssse3_pshuf_b_128;
1427       else if (VecTy->getPrimitiveSizeInBits() == 256)
1428         IID = Intrinsic::x86_avx2_pshuf_b;
1429       else
1430         llvm_unreachable("Unexpected intrinsic");
1431 
1432       Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
1433                                { CI->getArgOperand(0), CI->getArgOperand(1) });
1434       Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
1435                           CI->getArgOperand(2));
1436     } else if (IsX86 && Name.startswith("avx512.mask.psll")) {
1437       bool IsImmediate = Name[16] == 'i' ||
1438                          (Name.size() > 18 && Name[18] == 'i');
1439       bool IsVariable = Name[16] == 'v';
1440       char Size = Name[16] == '.' ? Name[17] :
1441                   Name[17] == '.' ? Name[18] :
1442                                     Name[19];
1443 
1444       Intrinsic::ID IID;
1445       if (IsVariable && Name[17] != '.') {
1446         if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di
1447           IID = Intrinsic::x86_avx2_psllv_q;
1448         else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di
1449           IID = Intrinsic::x86_avx2_psllv_q_256;
1450         else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si
1451           IID = Intrinsic::x86_avx2_psllv_d;
1452         else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si
1453           IID = Intrinsic::x86_avx2_psllv_d_256;
1454         else
1455           llvm_unreachable("Unexpected size");
1456       } else if (Name.endswith(".128")) {
1457         if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128
1458           IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d
1459                             : Intrinsic::x86_sse2_psll_d;
1460         else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128
1461           IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q
1462                             : Intrinsic::x86_sse2_psll_q;
1463         else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128
1464           IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w
1465                             : Intrinsic::x86_sse2_psll_w;
1466         else
1467           llvm_unreachable("Unexpected size");
1468       } else if (Name.endswith(".256")) {
1469         if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256
1470           IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d
1471                             : Intrinsic::x86_avx2_psll_d;
1472         else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256
1473           IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q
1474                             : Intrinsic::x86_avx2_psll_q;
1475         else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256
1476           IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w
1477                             : Intrinsic::x86_avx2_psll_w;
1478         else
1479           llvm_unreachable("Unexpected size");
1480       } else {
1481         if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512
1482           IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 :
1483                 IsVariable  ? Intrinsic::x86_avx512_psllv_d_512 :
1484                               Intrinsic::x86_avx512_psll_d_512;
1485         else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512
1486           IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 :
1487                 IsVariable  ? Intrinsic::x86_avx512_psllv_q_512 :
1488                               Intrinsic::x86_avx512_psll_q_512;
1489         else if (Size == 'w') // psll.wi.512, pslli.w, psll.w
1490           IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512
1491                             : Intrinsic::x86_avx512_psll_w_512;
1492         else
1493           llvm_unreachable("Unexpected size");
1494       }
1495 
1496       Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
1497     } else if (IsX86 && Name.startswith("avx512.mask.psrl")) {
1498       bool IsImmediate = Name[16] == 'i' ||
1499                          (Name.size() > 18 && Name[18] == 'i');
1500       bool IsVariable = Name[16] == 'v';
1501       char Size = Name[16] == '.' ? Name[17] :
1502                   Name[17] == '.' ? Name[18] :
1503                                     Name[19];
1504 
1505       Intrinsic::ID IID;
1506       if (IsVariable && Name[17] != '.') {
1507         if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di
1508           IID = Intrinsic::x86_avx2_psrlv_q;
1509         else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di
1510           IID = Intrinsic::x86_avx2_psrlv_q_256;
1511         else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si
1512           IID = Intrinsic::x86_avx2_psrlv_d;
1513         else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si
1514           IID = Intrinsic::x86_avx2_psrlv_d_256;
1515         else
1516           llvm_unreachable("Unexpected size");
1517       } else if (Name.endswith(".128")) {
1518         if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128
1519           IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d
1520                             : Intrinsic::x86_sse2_psrl_d;
1521         else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128
1522           IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q
1523                             : Intrinsic::x86_sse2_psrl_q;
1524         else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128
1525           IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w
1526                             : Intrinsic::x86_sse2_psrl_w;
1527         else
1528           llvm_unreachable("Unexpected size");
1529       } else if (Name.endswith(".256")) {
1530         if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256
1531           IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d
1532                             : Intrinsic::x86_avx2_psrl_d;
1533         else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256
1534           IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q
1535                             : Intrinsic::x86_avx2_psrl_q;
1536         else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256
1537           IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w
1538                             : Intrinsic::x86_avx2_psrl_w;
1539         else
1540           llvm_unreachable("Unexpected size");
1541       } else {
1542         if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512
1543           IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 :
1544                 IsVariable  ? Intrinsic::x86_avx512_psrlv_d_512 :
1545                               Intrinsic::x86_avx512_psrl_d_512;
1546         else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512
1547           IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 :
1548                 IsVariable  ? Intrinsic::x86_avx512_psrlv_q_512 :
1549                               Intrinsic::x86_avx512_psrl_q_512;
1550         else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w)
1551           IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512
1552                             : Intrinsic::x86_avx512_psrl_w_512;
1553         else
1554           llvm_unreachable("Unexpected size");
1555       }
1556 
1557       Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
1558     } else if (IsX86 && Name.startswith("avx512.mask.psra")) {
1559       bool IsImmediate = Name[16] == 'i' ||
1560                          (Name.size() > 18 && Name[18] == 'i');
1561       bool IsVariable = Name[16] == 'v';
1562       char Size = Name[16] == '.' ? Name[17] :
1563                   Name[17] == '.' ? Name[18] :
1564                                     Name[19];
1565 
1566       Intrinsic::ID IID;
1567       if (IsVariable && Name[17] != '.') {
1568         if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si
1569           IID = Intrinsic::x86_avx2_psrav_d;
1570         else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si
1571           IID = Intrinsic::x86_avx2_psrav_d_256;
1572         else
1573           llvm_unreachable("Unexpected size");
1574       } else if (Name.endswith(".128")) {
1575         if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128
1576           IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d
1577                             : Intrinsic::x86_sse2_psra_d;
1578         else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128
1579           IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 :
1580                 IsVariable  ? Intrinsic::x86_avx512_psrav_q_128 :
1581                               Intrinsic::x86_avx512_psra_q_128;
1582         else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128
1583           IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w
1584                             : Intrinsic::x86_sse2_psra_w;
1585         else
1586           llvm_unreachable("Unexpected size");
1587       } else if (Name.endswith(".256")) {
1588         if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256
1589           IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d
1590                             : Intrinsic::x86_avx2_psra_d;
1591         else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256
1592           IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 :
1593                 IsVariable  ? Intrinsic::x86_avx512_psrav_q_256 :
1594                               Intrinsic::x86_avx512_psra_q_256;
1595         else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256
1596           IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w
1597                             : Intrinsic::x86_avx2_psra_w;
1598         else
1599           llvm_unreachable("Unexpected size");
1600       } else {
1601         if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512
1602           IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 :
1603                 IsVariable  ? Intrinsic::x86_avx512_psrav_d_512 :
1604                               Intrinsic::x86_avx512_psra_d_512;
1605         else if (Size == 'q') // psra.qi.512, psrai.q, psra.q
1606           IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 :
1607                 IsVariable  ? Intrinsic::x86_avx512_psrav_q_512 :
1608                               Intrinsic::x86_avx512_psra_q_512;
1609         else if (Size == 'w') // psra.wi.512, psrai.w, psra.w
1610           IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512
1611                             : Intrinsic::x86_avx512_psra_w_512;
1612         else
1613           llvm_unreachable("Unexpected size");
1614       }
1615 
1616       Rep = UpgradeX86MaskedShift(Builder, *CI, IID);
1617     } else if (IsX86 && Name.startswith("avx512.mask.move.s")) {
1618       Rep = upgradeMaskedMove(Builder, *CI);
1619     } else {
1620       llvm_unreachable("Unknown function for CallInst upgrade.");
1621     }
1622 
1623     if (Rep)
1624       CI->replaceAllUsesWith(Rep);
1625     CI->eraseFromParent();
1626     return;
1627   }
1628 
1629   std::string Name = CI->getName();
1630   if (!Name.empty())
1631     CI->setName(Name + ".old");
1632 
1633   switch (NewFn->getIntrinsicID()) {
1634   default:
1635     llvm_unreachable("Unknown function for CallInst upgrade.");
1636 
1637   case Intrinsic::arm_neon_vld1:
1638   case Intrinsic::arm_neon_vld2:
1639   case Intrinsic::arm_neon_vld3:
1640   case Intrinsic::arm_neon_vld4:
1641   case Intrinsic::arm_neon_vld2lane:
1642   case Intrinsic::arm_neon_vld3lane:
1643   case Intrinsic::arm_neon_vld4lane:
1644   case Intrinsic::arm_neon_vst1:
1645   case Intrinsic::arm_neon_vst2:
1646   case Intrinsic::arm_neon_vst3:
1647   case Intrinsic::arm_neon_vst4:
1648   case Intrinsic::arm_neon_vst2lane:
1649   case Intrinsic::arm_neon_vst3lane:
1650   case Intrinsic::arm_neon_vst4lane: {
1651     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
1652                                  CI->arg_operands().end());
1653     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
1654     CI->eraseFromParent();
1655     return;
1656   }
1657 
1658   case Intrinsic::ctlz:
1659   case Intrinsic::cttz:
1660     assert(CI->getNumArgOperands() == 1 &&
1661            "Mismatch between function args and call args");
1662     CI->replaceAllUsesWith(Builder.CreateCall(
1663         NewFn, {CI->getArgOperand(0), Builder.getFalse()}, Name));
1664     CI->eraseFromParent();
1665     return;
1666 
1667   case Intrinsic::objectsize:
1668     CI->replaceAllUsesWith(Builder.CreateCall(
1669         NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)}, Name));
1670     CI->eraseFromParent();
1671     return;
1672 
1673   case Intrinsic::ctpop: {
1674     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {CI->getArgOperand(0)}));
1675     CI->eraseFromParent();
1676     return;
1677   }
1678 
1679   case Intrinsic::x86_xop_vfrcz_ss:
1680   case Intrinsic::x86_xop_vfrcz_sd:
1681     CI->replaceAllUsesWith(
1682         Builder.CreateCall(NewFn, {CI->getArgOperand(1)}, Name));
1683     CI->eraseFromParent();
1684     return;
1685 
1686   case Intrinsic::x86_xop_vpermil2pd:
1687   case Intrinsic::x86_xop_vpermil2ps:
1688   case Intrinsic::x86_xop_vpermil2pd_256:
1689   case Intrinsic::x86_xop_vpermil2ps_256: {
1690     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
1691                                  CI->arg_operands().end());
1692     VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType());
1693     VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy);
1694     Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy);
1695     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args, Name));
1696     CI->eraseFromParent();
1697     return;
1698   }
1699 
1700   case Intrinsic::x86_sse41_ptestc:
1701   case Intrinsic::x86_sse41_ptestz:
1702   case Intrinsic::x86_sse41_ptestnzc: {
1703     // The arguments for these intrinsics used to be v4f32, and changed
1704     // to v2i64. This is purely a nop, since those are bitwise intrinsics.
1705     // So, the only thing required is a bitcast for both arguments.
1706     // First, check the arguments have the old type.
1707     Value *Arg0 = CI->getArgOperand(0);
1708     if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4))
1709       return;
1710 
1711     // Old intrinsic, add bitcasts
1712     Value *Arg1 = CI->getArgOperand(1);
1713 
1714     Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2);
1715 
1716     Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast");
1717     Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast");
1718 
1719     CallInst *NewCall = Builder.CreateCall(NewFn, {BC0, BC1}, Name);
1720     CI->replaceAllUsesWith(NewCall);
1721     CI->eraseFromParent();
1722     return;
1723   }
1724 
1725   case Intrinsic::x86_sse41_insertps:
1726   case Intrinsic::x86_sse41_dppd:
1727   case Intrinsic::x86_sse41_dpps:
1728   case Intrinsic::x86_sse41_mpsadbw:
1729   case Intrinsic::x86_avx_dp_ps_256:
1730   case Intrinsic::x86_avx2_mpsadbw: {
1731     // Need to truncate the last argument from i32 to i8 -- this argument models
1732     // an inherently 8-bit immediate operand to these x86 instructions.
1733     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
1734                                  CI->arg_operands().end());
1735 
1736     // Replace the last argument with a trunc.
1737     Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc");
1738 
1739     CallInst *NewCall = Builder.CreateCall(NewFn, Args);
1740     CI->replaceAllUsesWith(NewCall);
1741     CI->eraseFromParent();
1742     return;
1743   }
1744 
1745   case Intrinsic::thread_pointer: {
1746     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, {}));
1747     CI->eraseFromParent();
1748     return;
1749   }
1750 
1751   case Intrinsic::invariant_start:
1752   case Intrinsic::invariant_end:
1753   case Intrinsic::masked_load:
1754   case Intrinsic::masked_store: {
1755     SmallVector<Value *, 4> Args(CI->arg_operands().begin(),
1756                                  CI->arg_operands().end());
1757     CI->replaceAllUsesWith(Builder.CreateCall(NewFn, Args));
1758     CI->eraseFromParent();
1759     return;
1760   }
1761   }
1762 }
1763 
1764 void llvm::UpgradeCallsToIntrinsic(Function *F) {
1765   assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
1766 
1767   // Check if this function should be upgraded and get the replacement function
1768   // if there is one.
1769   Function *NewFn;
1770   if (UpgradeIntrinsicFunction(F, NewFn)) {
1771     // Replace all users of the old function with the new function or new
1772     // instructions. This is not a range loop because the call is deleted.
1773     for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; )
1774       if (CallInst *CI = dyn_cast<CallInst>(*UI++))
1775         UpgradeIntrinsicCall(CI, NewFn);
1776 
1777     // Remove old function, no longer used, from the module.
1778     F->eraseFromParent();
1779   }
1780 }
1781 
1782 MDNode *llvm::UpgradeTBAANode(MDNode &MD) {
1783   // Check if the tag uses struct-path aware TBAA format.
1784   if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3)
1785     return &MD;
1786 
1787   auto &Context = MD.getContext();
1788   if (MD.getNumOperands() == 3) {
1789     Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)};
1790     MDNode *ScalarType = MDNode::get(Context, Elts);
1791     // Create a MDNode <ScalarType, ScalarType, offset 0, const>
1792     Metadata *Elts2[] = {ScalarType, ScalarType,
1793                          ConstantAsMetadata::get(
1794                              Constant::getNullValue(Type::getInt64Ty(Context))),
1795                          MD.getOperand(2)};
1796     return MDNode::get(Context, Elts2);
1797   }
1798   // Create a MDNode <MD, MD, offset 0>
1799   Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue(
1800                                     Type::getInt64Ty(Context)))};
1801   return MDNode::get(Context, Elts);
1802 }
1803 
1804 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
1805                                       Instruction *&Temp) {
1806   if (Opc != Instruction::BitCast)
1807     return nullptr;
1808 
1809   Temp = nullptr;
1810   Type *SrcTy = V->getType();
1811   if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
1812       SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
1813     LLVMContext &Context = V->getContext();
1814 
1815     // We have no information about target data layout, so we assume that
1816     // the maximum pointer size is 64bit.
1817     Type *MidTy = Type::getInt64Ty(Context);
1818     Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy);
1819 
1820     return CastInst::Create(Instruction::IntToPtr, Temp, DestTy);
1821   }
1822 
1823   return nullptr;
1824 }
1825 
1826 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) {
1827   if (Opc != Instruction::BitCast)
1828     return nullptr;
1829 
1830   Type *SrcTy = C->getType();
1831   if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() &&
1832       SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) {
1833     LLVMContext &Context = C->getContext();
1834 
1835     // We have no information about target data layout, so we assume that
1836     // the maximum pointer size is 64bit.
1837     Type *MidTy = Type::getInt64Ty(Context);
1838 
1839     return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy),
1840                                      DestTy);
1841   }
1842 
1843   return nullptr;
1844 }
1845 
1846 /// Check the debug info version number, if it is out-dated, drop the debug
1847 /// info. Return true if module is modified.
1848 bool llvm::UpgradeDebugInfo(Module &M) {
1849   unsigned Version = getDebugMetadataVersionFromModule(M);
1850   if (Version == DEBUG_METADATA_VERSION)
1851     return false;
1852 
1853   bool RetCode = StripDebugInfo(M);
1854   if (RetCode) {
1855     DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version);
1856     M.getContext().diagnose(DiagVersion);
1857   }
1858   return RetCode;
1859 }
1860 
1861 bool llvm::UpgradeModuleFlags(Module &M) {
1862   const NamedMDNode *ModFlags = M.getModuleFlagsMetadata();
1863   if (!ModFlags)
1864     return false;
1865 
1866   bool HasObjCFlag = false, HasClassProperties = false;
1867   for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) {
1868     MDNode *Op = ModFlags->getOperand(I);
1869     if (Op->getNumOperands() < 2)
1870       continue;
1871     MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1872     if (!ID)
1873       continue;
1874     if (ID->getString() == "Objective-C Image Info Version")
1875       HasObjCFlag = true;
1876     if (ID->getString() == "Objective-C Class Properties")
1877       HasClassProperties = true;
1878   }
1879   // "Objective-C Class Properties" is recently added for Objective-C. We
1880   // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module
1881   // flag of value 0, so we can correclty downgrade this flag when trying to
1882   // link an ObjC bitcode without this module flag with an ObjC bitcode with
1883   // this module flag.
1884   if (HasObjCFlag && !HasClassProperties) {
1885     M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties",
1886                     (uint32_t)0);
1887     return true;
1888   }
1889   return false;
1890 }
1891 
1892 static bool isOldLoopArgument(Metadata *MD) {
1893   auto *T = dyn_cast_or_null<MDTuple>(MD);
1894   if (!T)
1895     return false;
1896   if (T->getNumOperands() < 1)
1897     return false;
1898   auto *S = dyn_cast_or_null<MDString>(T->getOperand(0));
1899   if (!S)
1900     return false;
1901   return S->getString().startswith("llvm.vectorizer.");
1902 }
1903 
1904 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) {
1905   StringRef OldPrefix = "llvm.vectorizer.";
1906   assert(OldTag.startswith(OldPrefix) && "Expected old prefix");
1907 
1908   if (OldTag == "llvm.vectorizer.unroll")
1909     return MDString::get(C, "llvm.loop.interleave.count");
1910 
1911   return MDString::get(
1912       C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size()))
1913              .str());
1914 }
1915 
1916 static Metadata *upgradeLoopArgument(Metadata *MD) {
1917   auto *T = dyn_cast_or_null<MDTuple>(MD);
1918   if (!T)
1919     return MD;
1920   if (T->getNumOperands() < 1)
1921     return MD;
1922   auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0));
1923   if (!OldTag)
1924     return MD;
1925   if (!OldTag->getString().startswith("llvm.vectorizer."))
1926     return MD;
1927 
1928   // This has an old tag.  Upgrade it.
1929   SmallVector<Metadata *, 8> Ops;
1930   Ops.reserve(T->getNumOperands());
1931   Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString()));
1932   for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I)
1933     Ops.push_back(T->getOperand(I));
1934 
1935   return MDTuple::get(T->getContext(), Ops);
1936 }
1937 
1938 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) {
1939   auto *T = dyn_cast<MDTuple>(&N);
1940   if (!T)
1941     return &N;
1942 
1943   if (none_of(T->operands(), isOldLoopArgument))
1944     return &N;
1945 
1946   SmallVector<Metadata *, 8> Ops;
1947   Ops.reserve(T->getNumOperands());
1948   for (Metadata *MD : T->operands())
1949     Ops.push_back(upgradeLoopArgument(MD));
1950 
1951   return MDTuple::get(T->getContext(), Ops);
1952 }
1953