1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Builtin calls as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCUDARuntime.h"
14 #include "CGCXXABI.h"
15 #include "CGObjCRuntime.h"
16 #include "CGOpenCLRuntime.h"
17 #include "CGRecordLayout.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "ConstantEmitter.h"
21 #include "PatternInit.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/Attr.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/OSLog.h"
27 #include "clang/Basic/TargetBuiltins.h"
28 #include "clang/Basic/TargetInfo.h"
29 #include "clang/CodeGen/CGFunctionInfo.h"
30 #include "llvm/ADT/APFloat.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/SmallPtrSet.h"
33 #include "llvm/ADT/StringExtras.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/IntrinsicsAArch64.h"
39 #include "llvm/IR/IntrinsicsAMDGPU.h"
40 #include "llvm/IR/IntrinsicsARM.h"
41 #include "llvm/IR/IntrinsicsBPF.h"
42 #include "llvm/IR/IntrinsicsHexagon.h"
43 #include "llvm/IR/IntrinsicsNVPTX.h"
44 #include "llvm/IR/IntrinsicsPowerPC.h"
45 #include "llvm/IR/IntrinsicsR600.h"
46 #include "llvm/IR/IntrinsicsRISCV.h"
47 #include "llvm/IR/IntrinsicsS390.h"
48 #include "llvm/IR/IntrinsicsWebAssembly.h"
49 #include "llvm/IR/IntrinsicsX86.h"
50 #include "llvm/IR/MDBuilder.h"
51 #include "llvm/IR/MatrixBuilder.h"
52 #include "llvm/Support/ConvertUTF.h"
53 #include "llvm/Support/ScopedPrinter.h"
54 #include "llvm/Support/X86TargetParser.h"
55 #include <sstream>
56 
57 using namespace clang;
58 using namespace CodeGen;
59 using namespace llvm;
60 
61 static
62 int64_t clamp(int64_t Value, int64_t Low, int64_t High) {
63   return std::min(High, std::max(Low, Value));
64 }
65 
66 static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
67                              Align AlignmentInBytes) {
68   ConstantInt *Byte;
69   switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
70   case LangOptions::TrivialAutoVarInitKind::Uninitialized:
71     // Nothing to initialize.
72     return;
73   case LangOptions::TrivialAutoVarInitKind::Zero:
74     Byte = CGF.Builder.getInt8(0x00);
75     break;
76   case LangOptions::TrivialAutoVarInitKind::Pattern: {
77     llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
78     Byte = llvm::dyn_cast<llvm::ConstantInt>(
79         initializationPatternFor(CGF.CGM, Int8));
80     break;
81   }
82   }
83   if (CGF.CGM.stopAutoInit())
84     return;
85   auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
86   I->addAnnotationMetadata("auto-init");
87 }
88 
89 /// getBuiltinLibFunction - Given a builtin id for a function like
90 /// "__builtin_fabsf", return a Function* for "fabsf".
91 llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
92                                                      unsigned BuiltinID) {
93   assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
94 
95   // Get the name, skip over the __builtin_ prefix (if necessary).
96   StringRef Name;
97   GlobalDecl D(FD);
98 
99   // If the builtin has been declared explicitly with an assembler label,
100   // use the mangled name. This differs from the plain label on platforms
101   // that prefix labels.
102   if (FD->hasAttr<AsmLabelAttr>())
103     Name = getMangledName(D);
104   else
105     Name = Context.BuiltinInfo.getName(BuiltinID) + 10;
106 
107   llvm::FunctionType *Ty =
108     cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
109 
110   return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
111 }
112 
113 /// Emit the conversions required to turn the given value into an
114 /// integer of the given size.
115 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
116                         QualType T, llvm::IntegerType *IntType) {
117   V = CGF.EmitToMemory(V, T);
118 
119   if (V->getType()->isPointerTy())
120     return CGF.Builder.CreatePtrToInt(V, IntType);
121 
122   assert(V->getType() == IntType);
123   return V;
124 }
125 
126 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
127                           QualType T, llvm::Type *ResultType) {
128   V = CGF.EmitFromMemory(V, T);
129 
130   if (ResultType->isPointerTy())
131     return CGF.Builder.CreateIntToPtr(V, ResultType);
132 
133   assert(V->getType() == ResultType);
134   return V;
135 }
136 
137 /// Utility to insert an atomic instruction based on Intrinsic::ID
138 /// and the expression node.
139 static Value *MakeBinaryAtomicValue(
140     CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
141     AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
142   QualType T = E->getType();
143   assert(E->getArg(0)->getType()->isPointerType());
144   assert(CGF.getContext().hasSameUnqualifiedType(T,
145                                   E->getArg(0)->getType()->getPointeeType()));
146   assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
147 
148   llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
149   unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
150 
151   llvm::IntegerType *IntType =
152     llvm::IntegerType::get(CGF.getLLVMContext(),
153                            CGF.getContext().getTypeSize(T));
154   llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
155 
156   llvm::Value *Args[2];
157   Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
158   Args[1] = CGF.EmitScalarExpr(E->getArg(1));
159   llvm::Type *ValueType = Args[1]->getType();
160   Args[1] = EmitToInt(CGF, Args[1], T, IntType);
161 
162   llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
163       Kind, Args[0], Args[1], Ordering);
164   return EmitFromInt(CGF, Result, T, ValueType);
165 }
166 
167 static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
168   Value *Val = CGF.EmitScalarExpr(E->getArg(0));
169   Value *Address = CGF.EmitScalarExpr(E->getArg(1));
170 
171   // Convert the type of the pointer to a pointer to the stored type.
172   Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
173   Value *BC = CGF.Builder.CreateBitCast(
174       Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
175   LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
176   LV.setNontemporal(true);
177   CGF.EmitStoreOfScalar(Val, LV, false);
178   return nullptr;
179 }
180 
181 static Value *EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E) {
182   Value *Address = CGF.EmitScalarExpr(E->getArg(0));
183 
184   LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
185   LV.setNontemporal(true);
186   return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
187 }
188 
189 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
190                                llvm::AtomicRMWInst::BinOp Kind,
191                                const CallExpr *E) {
192   return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
193 }
194 
195 /// Utility to insert an atomic instruction based Intrinsic::ID and
196 /// the expression node, where the return value is the result of the
197 /// operation.
198 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
199                                    llvm::AtomicRMWInst::BinOp Kind,
200                                    const CallExpr *E,
201                                    Instruction::BinaryOps Op,
202                                    bool Invert = false) {
203   QualType T = E->getType();
204   assert(E->getArg(0)->getType()->isPointerType());
205   assert(CGF.getContext().hasSameUnqualifiedType(T,
206                                   E->getArg(0)->getType()->getPointeeType()));
207   assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
208 
209   llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
210   unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
211 
212   llvm::IntegerType *IntType =
213     llvm::IntegerType::get(CGF.getLLVMContext(),
214                            CGF.getContext().getTypeSize(T));
215   llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
216 
217   llvm::Value *Args[2];
218   Args[1] = CGF.EmitScalarExpr(E->getArg(1));
219   llvm::Type *ValueType = Args[1]->getType();
220   Args[1] = EmitToInt(CGF, Args[1], T, IntType);
221   Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
222 
223   llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
224       Kind, Args[0], Args[1], llvm::AtomicOrdering::SequentiallyConsistent);
225   Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
226   if (Invert)
227     Result =
228         CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
229                                 llvm::ConstantInt::getAllOnesValue(IntType));
230   Result = EmitFromInt(CGF, Result, T, ValueType);
231   return RValue::get(Result);
232 }
233 
234 /// Utility to insert an atomic cmpxchg instruction.
235 ///
236 /// @param CGF The current codegen function.
237 /// @param E   Builtin call expression to convert to cmpxchg.
238 ///            arg0 - address to operate on
239 ///            arg1 - value to compare with
240 ///            arg2 - new value
241 /// @param ReturnBool Specifies whether to return success flag of
242 ///                   cmpxchg result or the old value.
243 ///
244 /// @returns result of cmpxchg, according to ReturnBool
245 ///
246 /// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
247 /// invoke the function EmitAtomicCmpXchgForMSIntrin.
248 static Value *MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E,
249                                      bool ReturnBool) {
250   QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
251   llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
252   unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
253 
254   llvm::IntegerType *IntType = llvm::IntegerType::get(
255       CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
256   llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
257 
258   Value *Args[3];
259   Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
260   Args[1] = CGF.EmitScalarExpr(E->getArg(1));
261   llvm::Type *ValueType = Args[1]->getType();
262   Args[1] = EmitToInt(CGF, Args[1], T, IntType);
263   Args[2] = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
264 
265   Value *Pair = CGF.Builder.CreateAtomicCmpXchg(
266       Args[0], Args[1], Args[2], llvm::AtomicOrdering::SequentiallyConsistent,
267       llvm::AtomicOrdering::SequentiallyConsistent);
268   if (ReturnBool)
269     // Extract boolean success flag and zext it to int.
270     return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
271                                   CGF.ConvertType(E->getType()));
272   else
273     // Extract old value and emit it using the same type as compare value.
274     return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
275                        ValueType);
276 }
277 
278 /// This function should be invoked to emit atomic cmpxchg for Microsoft's
279 /// _InterlockedCompareExchange* intrinsics which have the following signature:
280 /// T _InterlockedCompareExchange(T volatile *Destination,
281 ///                               T Exchange,
282 ///                               T Comparand);
283 ///
284 /// Whereas the llvm 'cmpxchg' instruction has the following syntax:
285 /// cmpxchg *Destination, Comparand, Exchange.
286 /// So we need to swap Comparand and Exchange when invoking
287 /// CreateAtomicCmpXchg. That is the reason we could not use the above utility
288 /// function MakeAtomicCmpXchgValue since it expects the arguments to be
289 /// already swapped.
290 
291 static
292 Value *EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E,
293     AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
294   assert(E->getArg(0)->getType()->isPointerType());
295   assert(CGF.getContext().hasSameUnqualifiedType(
296       E->getType(), E->getArg(0)->getType()->getPointeeType()));
297   assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
298                                                  E->getArg(1)->getType()));
299   assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
300                                                  E->getArg(2)->getType()));
301 
302   auto *Destination = CGF.EmitScalarExpr(E->getArg(0));
303   auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
304   auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
305 
306   // For Release ordering, the failure ordering should be Monotonic.
307   auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
308                          AtomicOrdering::Monotonic :
309                          SuccessOrdering;
310 
311   // The atomic instruction is marked volatile for consistency with MSVC. This
312   // blocks the few atomics optimizations that LLVM has. If we want to optimize
313   // _Interlocked* operations in the future, we will have to remove the volatile
314   // marker.
315   auto *Result = CGF.Builder.CreateAtomicCmpXchg(
316                    Destination, Comparand, Exchange,
317                    SuccessOrdering, FailureOrdering);
318   Result->setVolatile(true);
319   return CGF.Builder.CreateExtractValue(Result, 0);
320 }
321 
322 // 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
323 // prototyped like this:
324 //
325 // unsigned char _InterlockedCompareExchange128...(
326 //     __int64 volatile * _Destination,
327 //     __int64 _ExchangeHigh,
328 //     __int64 _ExchangeLow,
329 //     __int64 * _ComparandResult);
330 static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
331                                               const CallExpr *E,
332                                               AtomicOrdering SuccessOrdering) {
333   assert(E->getNumArgs() == 4);
334   llvm::Value *Destination = CGF.EmitScalarExpr(E->getArg(0));
335   llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
336   llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
337   llvm::Value *ComparandPtr = CGF.EmitScalarExpr(E->getArg(3));
338 
339   assert(Destination->getType()->isPointerTy());
340   assert(!ExchangeHigh->getType()->isPointerTy());
341   assert(!ExchangeLow->getType()->isPointerTy());
342   assert(ComparandPtr->getType()->isPointerTy());
343 
344   // For Release ordering, the failure ordering should be Monotonic.
345   auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
346                              ? AtomicOrdering::Monotonic
347                              : SuccessOrdering;
348 
349   // Convert to i128 pointers and values.
350   llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
351   llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
352   Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
353   Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
354                           CGF.getContext().toCharUnitsFromBits(128));
355 
356   // (((i128)hi) << 64) | ((i128)lo)
357   ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
358   ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
359   ExchangeHigh =
360       CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
361   llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
362 
363   // Load the comparand for the instruction.
364   llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandResult);
365 
366   auto *CXI = CGF.Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
367                                               SuccessOrdering, FailureOrdering);
368 
369   // The atomic instruction is marked volatile for consistency with MSVC. This
370   // blocks the few atomics optimizations that LLVM has. If we want to optimize
371   // _Interlocked* operations in the future, we will have to remove the volatile
372   // marker.
373   CXI->setVolatile(true);
374 
375   // Store the result as an outparameter.
376   CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
377                           ComparandResult);
378 
379   // Get the success boolean and zero extend it to i8.
380   Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
381   return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
382 }
383 
384 static Value *EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E,
385     AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
386   assert(E->getArg(0)->getType()->isPointerType());
387 
388   auto *IntTy = CGF.ConvertType(E->getType());
389   auto *Result = CGF.Builder.CreateAtomicRMW(
390                    AtomicRMWInst::Add,
391                    CGF.EmitScalarExpr(E->getArg(0)),
392                    ConstantInt::get(IntTy, 1),
393                    Ordering);
394   return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
395 }
396 
397 static Value *EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E,
398     AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
399   assert(E->getArg(0)->getType()->isPointerType());
400 
401   auto *IntTy = CGF.ConvertType(E->getType());
402   auto *Result = CGF.Builder.CreateAtomicRMW(
403                    AtomicRMWInst::Sub,
404                    CGF.EmitScalarExpr(E->getArg(0)),
405                    ConstantInt::get(IntTy, 1),
406                    Ordering);
407   return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
408 }
409 
410 // Build a plain volatile load.
411 static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
412   Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
413   QualType ElTy = E->getArg(0)->getType()->getPointeeType();
414   CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
415   llvm::Type *ITy =
416       llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
417   Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
418   llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
419   Load->setVolatile(true);
420   return Load;
421 }
422 
423 // Build a plain volatile store.
424 static Value *EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
425   Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
426   Value *Value = CGF.EmitScalarExpr(E->getArg(1));
427   QualType ElTy = E->getArg(0)->getType()->getPointeeType();
428   CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
429   llvm::Type *ITy =
430       llvm::IntegerType::get(CGF.getLLVMContext(), StoreSize.getQuantity() * 8);
431   Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
432   llvm::StoreInst *Store =
433       CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
434   Store->setVolatile(true);
435   return Store;
436 }
437 
438 // Emit a simple mangled intrinsic that has 1 argument and a return type
439 // matching the argument type. Depending on mode, this may be a constrained
440 // floating-point intrinsic.
441 static Value *emitUnaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
442                                 const CallExpr *E, unsigned IntrinsicID,
443                                 unsigned ConstrainedIntrinsicID) {
444   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
445 
446   if (CGF.Builder.getIsFPConstrained()) {
447     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
448     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
449     return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
450   } else {
451     Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
452     return CGF.Builder.CreateCall(F, Src0);
453   }
454 }
455 
456 // Emit an intrinsic that has 2 operands of the same type as its result.
457 // Depending on mode, this may be a constrained floating-point intrinsic.
458 static Value *emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
459                                 const CallExpr *E, unsigned IntrinsicID,
460                                 unsigned ConstrainedIntrinsicID) {
461   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
462   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
463 
464   if (CGF.Builder.getIsFPConstrained()) {
465     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
466     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
467     return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
468   } else {
469     Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
470     return CGF.Builder.CreateCall(F, { Src0, Src1 });
471   }
472 }
473 
474 // Emit an intrinsic that has 3 operands of the same type as its result.
475 // Depending on mode, this may be a constrained floating-point intrinsic.
476 static Value *emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
477                                  const CallExpr *E, unsigned IntrinsicID,
478                                  unsigned ConstrainedIntrinsicID) {
479   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
480   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
481   llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
482 
483   if (CGF.Builder.getIsFPConstrained()) {
484     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
485     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
486     return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
487   } else {
488     Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
489     return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
490   }
491 }
492 
493 // Emit an intrinsic where all operands are of the same type as the result.
494 // Depending on mode, this may be a constrained floating-point intrinsic.
495 static Value *emitCallMaybeConstrainedFPBuiltin(CodeGenFunction &CGF,
496                                                 unsigned IntrinsicID,
497                                                 unsigned ConstrainedIntrinsicID,
498                                                 llvm::Type *Ty,
499                                                 ArrayRef<Value *> Args) {
500   Function *F;
501   if (CGF.Builder.getIsFPConstrained())
502     F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Ty);
503   else
504     F = CGF.CGM.getIntrinsic(IntrinsicID, Ty);
505 
506   if (CGF.Builder.getIsFPConstrained())
507     return CGF.Builder.CreateConstrainedFPCall(F, Args);
508   else
509     return CGF.Builder.CreateCall(F, Args);
510 }
511 
512 // Emit a simple mangled intrinsic that has 1 argument and a return type
513 // matching the argument type.
514 static Value *emitUnaryBuiltin(CodeGenFunction &CGF,
515                                const CallExpr *E,
516                                unsigned IntrinsicID) {
517   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
518 
519   Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
520   return CGF.Builder.CreateCall(F, Src0);
521 }
522 
523 // Emit an intrinsic that has 2 operands of the same type as its result.
524 static Value *emitBinaryBuiltin(CodeGenFunction &CGF,
525                                 const CallExpr *E,
526                                 unsigned IntrinsicID) {
527   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
528   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
529 
530   Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
531   return CGF.Builder.CreateCall(F, { Src0, Src1 });
532 }
533 
534 // Emit an intrinsic that has 3 operands of the same type as its result.
535 static Value *emitTernaryBuiltin(CodeGenFunction &CGF,
536                                  const CallExpr *E,
537                                  unsigned IntrinsicID) {
538   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
539   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
540   llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
541 
542   Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
543   return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
544 }
545 
546 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
547 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
548                                const CallExpr *E,
549                                unsigned IntrinsicID) {
550   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
551   llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
552 
553   Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
554   return CGF.Builder.CreateCall(F, {Src0, Src1});
555 }
556 
557 // Emit an intrinsic that has overloaded integer result and fp operand.
558 static Value *
559 emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E,
560                                         unsigned IntrinsicID,
561                                         unsigned ConstrainedIntrinsicID) {
562   llvm::Type *ResultType = CGF.ConvertType(E->getType());
563   llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
564 
565   if (CGF.Builder.getIsFPConstrained()) {
566     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
567     Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
568                                        {ResultType, Src0->getType()});
569     return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
570   } else {
571     Function *F =
572         CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
573     return CGF.Builder.CreateCall(F, Src0);
574   }
575 }
576 
577 /// EmitFAbs - Emit a call to @llvm.fabs().
578 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
579   Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
580   llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
581   Call->setDoesNotAccessMemory();
582   return Call;
583 }
584 
585 /// Emit the computation of the sign bit for a floating point value. Returns
586 /// the i1 sign bit value.
587 static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
588   LLVMContext &C = CGF.CGM.getLLVMContext();
589 
590   llvm::Type *Ty = V->getType();
591   int Width = Ty->getPrimitiveSizeInBits();
592   llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
593   V = CGF.Builder.CreateBitCast(V, IntTy);
594   if (Ty->isPPC_FP128Ty()) {
595     // We want the sign bit of the higher-order double. The bitcast we just
596     // did works as if the double-double was stored to memory and then
597     // read as an i128. The "store" will put the higher-order double in the
598     // lower address in both little- and big-Endian modes, but the "load"
599     // will treat those bits as a different part of the i128: the low bits in
600     // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
601     // we need to shift the high bits down to the low before truncating.
602     Width >>= 1;
603     if (CGF.getTarget().isBigEndian()) {
604       Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
605       V = CGF.Builder.CreateLShr(V, ShiftCst);
606     }
607     // We are truncating value in order to extract the higher-order
608     // double, which we will be using to extract the sign from.
609     IntTy = llvm::IntegerType::get(C, Width);
610     V = CGF.Builder.CreateTrunc(V, IntTy);
611   }
612   Value *Zero = llvm::Constant::getNullValue(IntTy);
613   return CGF.Builder.CreateICmpSLT(V, Zero);
614 }
615 
616 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
617                               const CallExpr *E, llvm::Constant *calleeValue) {
618   CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
619   return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
620 }
621 
622 /// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
623 /// depending on IntrinsicID.
624 ///
625 /// \arg CGF The current codegen function.
626 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
627 /// \arg X The first argument to the llvm.*.with.overflow.*.
628 /// \arg Y The second argument to the llvm.*.with.overflow.*.
629 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
630 /// \returns The result (i.e. sum/product) returned by the intrinsic.
631 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
632                                           const llvm::Intrinsic::ID IntrinsicID,
633                                           llvm::Value *X, llvm::Value *Y,
634                                           llvm::Value *&Carry) {
635   // Make sure we have integers of the same width.
636   assert(X->getType() == Y->getType() &&
637          "Arguments must be the same type. (Did you forget to make sure both "
638          "arguments have the same integer width?)");
639 
640   Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
641   llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
642   Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
643   return CGF.Builder.CreateExtractValue(Tmp, 0);
644 }
645 
646 static Value *emitRangedBuiltin(CodeGenFunction &CGF,
647                                 unsigned IntrinsicID,
648                                 int low, int high) {
649     llvm::MDBuilder MDHelper(CGF.getLLVMContext());
650     llvm::MDNode *RNode = MDHelper.createRange(APInt(32, low), APInt(32, high));
651     Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {});
652     llvm::Instruction *Call = CGF.Builder.CreateCall(F);
653     Call->setMetadata(llvm::LLVMContext::MD_range, RNode);
654     return Call;
655 }
656 
657 namespace {
658   struct WidthAndSignedness {
659     unsigned Width;
660     bool Signed;
661   };
662 }
663 
664 static WidthAndSignedness
665 getIntegerWidthAndSignedness(const clang::ASTContext &context,
666                              const clang::QualType Type) {
667   assert(Type->isIntegerType() && "Given type is not an integer.");
668   unsigned Width = Type->isBooleanType()  ? 1
669                    : Type->isExtIntType() ? context.getIntWidth(Type)
670                                           : context.getTypeInfo(Type).Width;
671   bool Signed = Type->isSignedIntegerType();
672   return {Width, Signed};
673 }
674 
675 // Given one or more integer types, this function produces an integer type that
676 // encompasses them: any value in one of the given types could be expressed in
677 // the encompassing type.
678 static struct WidthAndSignedness
679 EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
680   assert(Types.size() > 0 && "Empty list of types.");
681 
682   // If any of the given types is signed, we must return a signed type.
683   bool Signed = false;
684   for (const auto &Type : Types) {
685     Signed |= Type.Signed;
686   }
687 
688   // The encompassing type must have a width greater than or equal to the width
689   // of the specified types.  Additionally, if the encompassing type is signed,
690   // its width must be strictly greater than the width of any unsigned types
691   // given.
692   unsigned Width = 0;
693   for (const auto &Type : Types) {
694     unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
695     if (Width < MinWidth) {
696       Width = MinWidth;
697     }
698   }
699 
700   return {Width, Signed};
701 }
702 
703 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
704   llvm::Type *DestType = Int8PtrTy;
705   if (ArgValue->getType() != DestType)
706     ArgValue =
707         Builder.CreateBitCast(ArgValue, DestType, ArgValue->getName().data());
708 
709   Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
710   return Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue);
711 }
712 
713 /// Checks if using the result of __builtin_object_size(p, @p From) in place of
714 /// __builtin_object_size(p, @p To) is correct
715 static bool areBOSTypesCompatible(int From, int To) {
716   // Note: Our __builtin_object_size implementation currently treats Type=0 and
717   // Type=2 identically. Encoding this implementation detail here may make
718   // improving __builtin_object_size difficult in the future, so it's omitted.
719   return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
720 }
721 
722 static llvm::Value *
723 getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
724   return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
725 }
726 
727 llvm::Value *
728 CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
729                                                  llvm::IntegerType *ResType,
730                                                  llvm::Value *EmittedE,
731                                                  bool IsDynamic) {
732   uint64_t ObjectSize;
733   if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
734     return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
735   return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
736 }
737 
738 /// Returns a Value corresponding to the size of the given expression.
739 /// This Value may be either of the following:
740 ///   - A llvm::Argument (if E is a param with the pass_object_size attribute on
741 ///     it)
742 ///   - A call to the @llvm.objectsize intrinsic
743 ///
744 /// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
745 /// and we wouldn't otherwise try to reference a pass_object_size parameter,
746 /// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
747 llvm::Value *
748 CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
749                                        llvm::IntegerType *ResType,
750                                        llvm::Value *EmittedE, bool IsDynamic) {
751   // We need to reference an argument if the pointer is a parameter with the
752   // pass_object_size attribute.
753   if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
754     auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
755     auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
756     if (Param != nullptr && PS != nullptr &&
757         areBOSTypesCompatible(PS->getType(), Type)) {
758       auto Iter = SizeArguments.find(Param);
759       assert(Iter != SizeArguments.end());
760 
761       const ImplicitParamDecl *D = Iter->second;
762       auto DIter = LocalDeclMap.find(D);
763       assert(DIter != LocalDeclMap.end());
764 
765       return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
766                               getContext().getSizeType(), E->getBeginLoc());
767     }
768   }
769 
770   // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
771   // evaluate E for side-effects. In either case, we shouldn't lower to
772   // @llvm.objectsize.
773   if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
774     return getDefaultBuiltinObjectSizeResult(Type, ResType);
775 
776   Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
777   assert(Ptr->getType()->isPointerTy() &&
778          "Non-pointer passed to __builtin_object_size?");
779 
780   Function *F =
781       CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
782 
783   // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
784   Value *Min = Builder.getInt1((Type & 2) != 0);
785   // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
786   Value *NullIsUnknown = Builder.getTrue();
787   Value *Dynamic = Builder.getInt1(IsDynamic);
788   return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
789 }
790 
791 namespace {
792 /// A struct to generically describe a bit test intrinsic.
793 struct BitTest {
794   enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
795   enum InterlockingKind : uint8_t {
796     Unlocked,
797     Sequential,
798     Acquire,
799     Release,
800     NoFence
801   };
802 
803   ActionKind Action;
804   InterlockingKind Interlocking;
805   bool Is64Bit;
806 
807   static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
808 };
809 } // namespace
810 
811 BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
812   switch (BuiltinID) {
813     // Main portable variants.
814   case Builtin::BI_bittest:
815     return {TestOnly, Unlocked, false};
816   case Builtin::BI_bittestandcomplement:
817     return {Complement, Unlocked, false};
818   case Builtin::BI_bittestandreset:
819     return {Reset, Unlocked, false};
820   case Builtin::BI_bittestandset:
821     return {Set, Unlocked, false};
822   case Builtin::BI_interlockedbittestandreset:
823     return {Reset, Sequential, false};
824   case Builtin::BI_interlockedbittestandset:
825     return {Set, Sequential, false};
826 
827     // X86-specific 64-bit variants.
828   case Builtin::BI_bittest64:
829     return {TestOnly, Unlocked, true};
830   case Builtin::BI_bittestandcomplement64:
831     return {Complement, Unlocked, true};
832   case Builtin::BI_bittestandreset64:
833     return {Reset, Unlocked, true};
834   case Builtin::BI_bittestandset64:
835     return {Set, Unlocked, true};
836   case Builtin::BI_interlockedbittestandreset64:
837     return {Reset, Sequential, true};
838   case Builtin::BI_interlockedbittestandset64:
839     return {Set, Sequential, true};
840 
841     // ARM/AArch64-specific ordering variants.
842   case Builtin::BI_interlockedbittestandset_acq:
843     return {Set, Acquire, false};
844   case Builtin::BI_interlockedbittestandset_rel:
845     return {Set, Release, false};
846   case Builtin::BI_interlockedbittestandset_nf:
847     return {Set, NoFence, false};
848   case Builtin::BI_interlockedbittestandreset_acq:
849     return {Reset, Acquire, false};
850   case Builtin::BI_interlockedbittestandreset_rel:
851     return {Reset, Release, false};
852   case Builtin::BI_interlockedbittestandreset_nf:
853     return {Reset, NoFence, false};
854   }
855   llvm_unreachable("expected only bittest intrinsics");
856 }
857 
858 static char bitActionToX86BTCode(BitTest::ActionKind A) {
859   switch (A) {
860   case BitTest::TestOnly:   return '\0';
861   case BitTest::Complement: return 'c';
862   case BitTest::Reset:      return 'r';
863   case BitTest::Set:        return 's';
864   }
865   llvm_unreachable("invalid action");
866 }
867 
868 static llvm::Value *EmitX86BitTestIntrinsic(CodeGenFunction &CGF,
869                                             BitTest BT,
870                                             const CallExpr *E, Value *BitBase,
871                                             Value *BitPos) {
872   char Action = bitActionToX86BTCode(BT.Action);
873   char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
874 
875   // Build the assembly.
876   SmallString<64> Asm;
877   raw_svector_ostream AsmOS(Asm);
878   if (BT.Interlocking != BitTest::Unlocked)
879     AsmOS << "lock ";
880   AsmOS << "bt";
881   if (Action)
882     AsmOS << Action;
883   AsmOS << SizeSuffix << " $2, ($1)";
884 
885   // Build the constraints. FIXME: We should support immediates when possible.
886   std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
887   std::string MachineClobbers = CGF.getTarget().getClobbers();
888   if (!MachineClobbers.empty()) {
889     Constraints += ',';
890     Constraints += MachineClobbers;
891   }
892   llvm::IntegerType *IntType = llvm::IntegerType::get(
893       CGF.getLLVMContext(),
894       CGF.getContext().getTypeSize(E->getArg(1)->getType()));
895   llvm::Type *IntPtrType = IntType->getPointerTo();
896   llvm::FunctionType *FTy =
897       llvm::FunctionType::get(CGF.Int8Ty, {IntPtrType, IntType}, false);
898 
899   llvm::InlineAsm *IA =
900       llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
901   return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
902 }
903 
904 static llvm::AtomicOrdering
905 getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
906   switch (I) {
907   case BitTest::Unlocked:   return llvm::AtomicOrdering::NotAtomic;
908   case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
909   case BitTest::Acquire:    return llvm::AtomicOrdering::Acquire;
910   case BitTest::Release:    return llvm::AtomicOrdering::Release;
911   case BitTest::NoFence:    return llvm::AtomicOrdering::Monotonic;
912   }
913   llvm_unreachable("invalid interlocking");
914 }
915 
916 /// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
917 /// bits and a bit position and read and optionally modify the bit at that
918 /// position. The position index can be arbitrarily large, i.e. it can be larger
919 /// than 31 or 63, so we need an indexed load in the general case.
920 static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
921                                          unsigned BuiltinID,
922                                          const CallExpr *E) {
923   Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
924   Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
925 
926   BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
927 
928   // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
929   // indexing operation internally. Use them if possible.
930   if (CGF.getTarget().getTriple().isX86())
931     return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
932 
933   // Otherwise, use generic code to load one byte and test the bit. Use all but
934   // the bottom three bits as the array index, and the bottom three bits to form
935   // a mask.
936   // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
937   Value *ByteIndex = CGF.Builder.CreateAShr(
938       BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
939   Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
940   Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
941                                                  ByteIndex, "bittest.byteaddr"),
942                    CharUnits::One());
943   Value *PosLow =
944       CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
945                             llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
946 
947   // The updating instructions will need a mask.
948   Value *Mask = nullptr;
949   if (BT.Action != BitTest::TestOnly) {
950     Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
951                                  "bittest.mask");
952   }
953 
954   // Check the action and ordering of the interlocked intrinsics.
955   llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
956 
957   Value *OldByte = nullptr;
958   if (Ordering != llvm::AtomicOrdering::NotAtomic) {
959     // Emit a combined atomicrmw load/store operation for the interlocked
960     // intrinsics.
961     llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
962     if (BT.Action == BitTest::Reset) {
963       Mask = CGF.Builder.CreateNot(Mask);
964       RMWOp = llvm::AtomicRMWInst::And;
965     }
966     OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr.getPointer(), Mask,
967                                           Ordering);
968   } else {
969     // Emit a plain load for the non-interlocked intrinsics.
970     OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
971     Value *NewByte = nullptr;
972     switch (BT.Action) {
973     case BitTest::TestOnly:
974       // Don't store anything.
975       break;
976     case BitTest::Complement:
977       NewByte = CGF.Builder.CreateXor(OldByte, Mask);
978       break;
979     case BitTest::Reset:
980       NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
981       break;
982     case BitTest::Set:
983       NewByte = CGF.Builder.CreateOr(OldByte, Mask);
984       break;
985     }
986     if (NewByte)
987       CGF.Builder.CreateStore(NewByte, ByteAddr);
988   }
989 
990   // However we loaded the old byte, either by plain load or atomicrmw, shift
991   // the bit into the low position and mask it to 0 or 1.
992   Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
993   return CGF.Builder.CreateAnd(
994       ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
995 }
996 
997 static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
998                                                 unsigned BuiltinID,
999                                                 const CallExpr *E) {
1000   Value *Addr = CGF.EmitScalarExpr(E->getArg(0));
1001 
1002   SmallString<64> Asm;
1003   raw_svector_ostream AsmOS(Asm);
1004   llvm::IntegerType *RetType = CGF.Int32Ty;
1005 
1006   switch (BuiltinID) {
1007   case clang::PPC::BI__builtin_ppc_ldarx:
1008     AsmOS << "ldarx ";
1009     RetType = CGF.Int64Ty;
1010     break;
1011   case clang::PPC::BI__builtin_ppc_lwarx:
1012     AsmOS << "lwarx ";
1013     RetType = CGF.Int32Ty;
1014     break;
1015   case clang::PPC::BI__builtin_ppc_lharx:
1016     AsmOS << "lharx ";
1017     RetType = CGF.Int16Ty;
1018     break;
1019   case clang::PPC::BI__builtin_ppc_lbarx:
1020     AsmOS << "lbarx ";
1021     RetType = CGF.Int8Ty;
1022     break;
1023   default:
1024     llvm_unreachable("Expected only PowerPC load reserve intrinsics");
1025   }
1026 
1027   AsmOS << "$0, ${1:y}";
1028 
1029   std::string Constraints = "=r,*Z,~{memory}";
1030   std::string MachineClobbers = CGF.getTarget().getClobbers();
1031   if (!MachineClobbers.empty()) {
1032     Constraints += ',';
1033     Constraints += MachineClobbers;
1034   }
1035 
1036   llvm::Type *IntPtrType = RetType->getPointerTo();
1037   llvm::FunctionType *FTy =
1038       llvm::FunctionType::get(RetType, {IntPtrType}, false);
1039 
1040   llvm::InlineAsm *IA =
1041       llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1042   return CGF.Builder.CreateCall(IA, {Addr});
1043 }
1044 
1045 namespace {
1046 enum class MSVCSetJmpKind {
1047   _setjmpex,
1048   _setjmp3,
1049   _setjmp
1050 };
1051 }
1052 
1053 /// MSVC handles setjmp a bit differently on different platforms. On every
1054 /// architecture except 32-bit x86, the frame address is passed. On x86, extra
1055 /// parameters can be passed as variadic arguments, but we always pass none.
1056 static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1057                                const CallExpr *E) {
1058   llvm::Value *Arg1 = nullptr;
1059   llvm::Type *Arg1Ty = nullptr;
1060   StringRef Name;
1061   bool IsVarArg = false;
1062   if (SJKind == MSVCSetJmpKind::_setjmp3) {
1063     Name = "_setjmp3";
1064     Arg1Ty = CGF.Int32Ty;
1065     Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1066     IsVarArg = true;
1067   } else {
1068     Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1069     Arg1Ty = CGF.Int8PtrTy;
1070     if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1071       Arg1 = CGF.Builder.CreateCall(
1072           CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1073     } else
1074       Arg1 = CGF.Builder.CreateCall(
1075           CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1076           llvm::ConstantInt::get(CGF.Int32Ty, 0));
1077   }
1078 
1079   // Mark the call site and declaration with ReturnsTwice.
1080   llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1081   llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1082       CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1083       llvm::Attribute::ReturnsTwice);
1084   llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1085       llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1086       ReturnsTwiceAttr, /*Local=*/true);
1087 
1088   llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1089       CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1090   llvm::Value *Args[] = {Buf, Arg1};
1091   llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1092   CB->setAttributes(ReturnsTwiceAttr);
1093   return RValue::get(CB);
1094 }
1095 
1096 // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
1097 // we handle them here.
1098 enum class CodeGenFunction::MSVCIntrin {
1099   _BitScanForward,
1100   _BitScanReverse,
1101   _InterlockedAnd,
1102   _InterlockedDecrement,
1103   _InterlockedExchange,
1104   _InterlockedExchangeAdd,
1105   _InterlockedExchangeSub,
1106   _InterlockedIncrement,
1107   _InterlockedOr,
1108   _InterlockedXor,
1109   _InterlockedExchangeAdd_acq,
1110   _InterlockedExchangeAdd_rel,
1111   _InterlockedExchangeAdd_nf,
1112   _InterlockedExchange_acq,
1113   _InterlockedExchange_rel,
1114   _InterlockedExchange_nf,
1115   _InterlockedCompareExchange_acq,
1116   _InterlockedCompareExchange_rel,
1117   _InterlockedCompareExchange_nf,
1118   _InterlockedCompareExchange128,
1119   _InterlockedCompareExchange128_acq,
1120   _InterlockedCompareExchange128_rel,
1121   _InterlockedCompareExchange128_nf,
1122   _InterlockedOr_acq,
1123   _InterlockedOr_rel,
1124   _InterlockedOr_nf,
1125   _InterlockedXor_acq,
1126   _InterlockedXor_rel,
1127   _InterlockedXor_nf,
1128   _InterlockedAnd_acq,
1129   _InterlockedAnd_rel,
1130   _InterlockedAnd_nf,
1131   _InterlockedIncrement_acq,
1132   _InterlockedIncrement_rel,
1133   _InterlockedIncrement_nf,
1134   _InterlockedDecrement_acq,
1135   _InterlockedDecrement_rel,
1136   _InterlockedDecrement_nf,
1137   __fastfail,
1138 };
1139 
1140 static Optional<CodeGenFunction::MSVCIntrin>
1141 translateArmToMsvcIntrin(unsigned BuiltinID) {
1142   using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1143   switch (BuiltinID) {
1144   default:
1145     return None;
1146   case ARM::BI_BitScanForward:
1147   case ARM::BI_BitScanForward64:
1148     return MSVCIntrin::_BitScanForward;
1149   case ARM::BI_BitScanReverse:
1150   case ARM::BI_BitScanReverse64:
1151     return MSVCIntrin::_BitScanReverse;
1152   case ARM::BI_InterlockedAnd64:
1153     return MSVCIntrin::_InterlockedAnd;
1154   case ARM::BI_InterlockedExchange64:
1155     return MSVCIntrin::_InterlockedExchange;
1156   case ARM::BI_InterlockedExchangeAdd64:
1157     return MSVCIntrin::_InterlockedExchangeAdd;
1158   case ARM::BI_InterlockedExchangeSub64:
1159     return MSVCIntrin::_InterlockedExchangeSub;
1160   case ARM::BI_InterlockedOr64:
1161     return MSVCIntrin::_InterlockedOr;
1162   case ARM::BI_InterlockedXor64:
1163     return MSVCIntrin::_InterlockedXor;
1164   case ARM::BI_InterlockedDecrement64:
1165     return MSVCIntrin::_InterlockedDecrement;
1166   case ARM::BI_InterlockedIncrement64:
1167     return MSVCIntrin::_InterlockedIncrement;
1168   case ARM::BI_InterlockedExchangeAdd8_acq:
1169   case ARM::BI_InterlockedExchangeAdd16_acq:
1170   case ARM::BI_InterlockedExchangeAdd_acq:
1171   case ARM::BI_InterlockedExchangeAdd64_acq:
1172     return MSVCIntrin::_InterlockedExchangeAdd_acq;
1173   case ARM::BI_InterlockedExchangeAdd8_rel:
1174   case ARM::BI_InterlockedExchangeAdd16_rel:
1175   case ARM::BI_InterlockedExchangeAdd_rel:
1176   case ARM::BI_InterlockedExchangeAdd64_rel:
1177     return MSVCIntrin::_InterlockedExchangeAdd_rel;
1178   case ARM::BI_InterlockedExchangeAdd8_nf:
1179   case ARM::BI_InterlockedExchangeAdd16_nf:
1180   case ARM::BI_InterlockedExchangeAdd_nf:
1181   case ARM::BI_InterlockedExchangeAdd64_nf:
1182     return MSVCIntrin::_InterlockedExchangeAdd_nf;
1183   case ARM::BI_InterlockedExchange8_acq:
1184   case ARM::BI_InterlockedExchange16_acq:
1185   case ARM::BI_InterlockedExchange_acq:
1186   case ARM::BI_InterlockedExchange64_acq:
1187     return MSVCIntrin::_InterlockedExchange_acq;
1188   case ARM::BI_InterlockedExchange8_rel:
1189   case ARM::BI_InterlockedExchange16_rel:
1190   case ARM::BI_InterlockedExchange_rel:
1191   case ARM::BI_InterlockedExchange64_rel:
1192     return MSVCIntrin::_InterlockedExchange_rel;
1193   case ARM::BI_InterlockedExchange8_nf:
1194   case ARM::BI_InterlockedExchange16_nf:
1195   case ARM::BI_InterlockedExchange_nf:
1196   case ARM::BI_InterlockedExchange64_nf:
1197     return MSVCIntrin::_InterlockedExchange_nf;
1198   case ARM::BI_InterlockedCompareExchange8_acq:
1199   case ARM::BI_InterlockedCompareExchange16_acq:
1200   case ARM::BI_InterlockedCompareExchange_acq:
1201   case ARM::BI_InterlockedCompareExchange64_acq:
1202     return MSVCIntrin::_InterlockedCompareExchange_acq;
1203   case ARM::BI_InterlockedCompareExchange8_rel:
1204   case ARM::BI_InterlockedCompareExchange16_rel:
1205   case ARM::BI_InterlockedCompareExchange_rel:
1206   case ARM::BI_InterlockedCompareExchange64_rel:
1207     return MSVCIntrin::_InterlockedCompareExchange_rel;
1208   case ARM::BI_InterlockedCompareExchange8_nf:
1209   case ARM::BI_InterlockedCompareExchange16_nf:
1210   case ARM::BI_InterlockedCompareExchange_nf:
1211   case ARM::BI_InterlockedCompareExchange64_nf:
1212     return MSVCIntrin::_InterlockedCompareExchange_nf;
1213   case ARM::BI_InterlockedOr8_acq:
1214   case ARM::BI_InterlockedOr16_acq:
1215   case ARM::BI_InterlockedOr_acq:
1216   case ARM::BI_InterlockedOr64_acq:
1217     return MSVCIntrin::_InterlockedOr_acq;
1218   case ARM::BI_InterlockedOr8_rel:
1219   case ARM::BI_InterlockedOr16_rel:
1220   case ARM::BI_InterlockedOr_rel:
1221   case ARM::BI_InterlockedOr64_rel:
1222     return MSVCIntrin::_InterlockedOr_rel;
1223   case ARM::BI_InterlockedOr8_nf:
1224   case ARM::BI_InterlockedOr16_nf:
1225   case ARM::BI_InterlockedOr_nf:
1226   case ARM::BI_InterlockedOr64_nf:
1227     return MSVCIntrin::_InterlockedOr_nf;
1228   case ARM::BI_InterlockedXor8_acq:
1229   case ARM::BI_InterlockedXor16_acq:
1230   case ARM::BI_InterlockedXor_acq:
1231   case ARM::BI_InterlockedXor64_acq:
1232     return MSVCIntrin::_InterlockedXor_acq;
1233   case ARM::BI_InterlockedXor8_rel:
1234   case ARM::BI_InterlockedXor16_rel:
1235   case ARM::BI_InterlockedXor_rel:
1236   case ARM::BI_InterlockedXor64_rel:
1237     return MSVCIntrin::_InterlockedXor_rel;
1238   case ARM::BI_InterlockedXor8_nf:
1239   case ARM::BI_InterlockedXor16_nf:
1240   case ARM::BI_InterlockedXor_nf:
1241   case ARM::BI_InterlockedXor64_nf:
1242     return MSVCIntrin::_InterlockedXor_nf;
1243   case ARM::BI_InterlockedAnd8_acq:
1244   case ARM::BI_InterlockedAnd16_acq:
1245   case ARM::BI_InterlockedAnd_acq:
1246   case ARM::BI_InterlockedAnd64_acq:
1247     return MSVCIntrin::_InterlockedAnd_acq;
1248   case ARM::BI_InterlockedAnd8_rel:
1249   case ARM::BI_InterlockedAnd16_rel:
1250   case ARM::BI_InterlockedAnd_rel:
1251   case ARM::BI_InterlockedAnd64_rel:
1252     return MSVCIntrin::_InterlockedAnd_rel;
1253   case ARM::BI_InterlockedAnd8_nf:
1254   case ARM::BI_InterlockedAnd16_nf:
1255   case ARM::BI_InterlockedAnd_nf:
1256   case ARM::BI_InterlockedAnd64_nf:
1257     return MSVCIntrin::_InterlockedAnd_nf;
1258   case ARM::BI_InterlockedIncrement16_acq:
1259   case ARM::BI_InterlockedIncrement_acq:
1260   case ARM::BI_InterlockedIncrement64_acq:
1261     return MSVCIntrin::_InterlockedIncrement_acq;
1262   case ARM::BI_InterlockedIncrement16_rel:
1263   case ARM::BI_InterlockedIncrement_rel:
1264   case ARM::BI_InterlockedIncrement64_rel:
1265     return MSVCIntrin::_InterlockedIncrement_rel;
1266   case ARM::BI_InterlockedIncrement16_nf:
1267   case ARM::BI_InterlockedIncrement_nf:
1268   case ARM::BI_InterlockedIncrement64_nf:
1269     return MSVCIntrin::_InterlockedIncrement_nf;
1270   case ARM::BI_InterlockedDecrement16_acq:
1271   case ARM::BI_InterlockedDecrement_acq:
1272   case ARM::BI_InterlockedDecrement64_acq:
1273     return MSVCIntrin::_InterlockedDecrement_acq;
1274   case ARM::BI_InterlockedDecrement16_rel:
1275   case ARM::BI_InterlockedDecrement_rel:
1276   case ARM::BI_InterlockedDecrement64_rel:
1277     return MSVCIntrin::_InterlockedDecrement_rel;
1278   case ARM::BI_InterlockedDecrement16_nf:
1279   case ARM::BI_InterlockedDecrement_nf:
1280   case ARM::BI_InterlockedDecrement64_nf:
1281     return MSVCIntrin::_InterlockedDecrement_nf;
1282   }
1283   llvm_unreachable("must return from switch");
1284 }
1285 
1286 static Optional<CodeGenFunction::MSVCIntrin>
1287 translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
1288   using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1289   switch (BuiltinID) {
1290   default:
1291     return None;
1292   case AArch64::BI_BitScanForward:
1293   case AArch64::BI_BitScanForward64:
1294     return MSVCIntrin::_BitScanForward;
1295   case AArch64::BI_BitScanReverse:
1296   case AArch64::BI_BitScanReverse64:
1297     return MSVCIntrin::_BitScanReverse;
1298   case AArch64::BI_InterlockedAnd64:
1299     return MSVCIntrin::_InterlockedAnd;
1300   case AArch64::BI_InterlockedExchange64:
1301     return MSVCIntrin::_InterlockedExchange;
1302   case AArch64::BI_InterlockedExchangeAdd64:
1303     return MSVCIntrin::_InterlockedExchangeAdd;
1304   case AArch64::BI_InterlockedExchangeSub64:
1305     return MSVCIntrin::_InterlockedExchangeSub;
1306   case AArch64::BI_InterlockedOr64:
1307     return MSVCIntrin::_InterlockedOr;
1308   case AArch64::BI_InterlockedXor64:
1309     return MSVCIntrin::_InterlockedXor;
1310   case AArch64::BI_InterlockedDecrement64:
1311     return MSVCIntrin::_InterlockedDecrement;
1312   case AArch64::BI_InterlockedIncrement64:
1313     return MSVCIntrin::_InterlockedIncrement;
1314   case AArch64::BI_InterlockedExchangeAdd8_acq:
1315   case AArch64::BI_InterlockedExchangeAdd16_acq:
1316   case AArch64::BI_InterlockedExchangeAdd_acq:
1317   case AArch64::BI_InterlockedExchangeAdd64_acq:
1318     return MSVCIntrin::_InterlockedExchangeAdd_acq;
1319   case AArch64::BI_InterlockedExchangeAdd8_rel:
1320   case AArch64::BI_InterlockedExchangeAdd16_rel:
1321   case AArch64::BI_InterlockedExchangeAdd_rel:
1322   case AArch64::BI_InterlockedExchangeAdd64_rel:
1323     return MSVCIntrin::_InterlockedExchangeAdd_rel;
1324   case AArch64::BI_InterlockedExchangeAdd8_nf:
1325   case AArch64::BI_InterlockedExchangeAdd16_nf:
1326   case AArch64::BI_InterlockedExchangeAdd_nf:
1327   case AArch64::BI_InterlockedExchangeAdd64_nf:
1328     return MSVCIntrin::_InterlockedExchangeAdd_nf;
1329   case AArch64::BI_InterlockedExchange8_acq:
1330   case AArch64::BI_InterlockedExchange16_acq:
1331   case AArch64::BI_InterlockedExchange_acq:
1332   case AArch64::BI_InterlockedExchange64_acq:
1333     return MSVCIntrin::_InterlockedExchange_acq;
1334   case AArch64::BI_InterlockedExchange8_rel:
1335   case AArch64::BI_InterlockedExchange16_rel:
1336   case AArch64::BI_InterlockedExchange_rel:
1337   case AArch64::BI_InterlockedExchange64_rel:
1338     return MSVCIntrin::_InterlockedExchange_rel;
1339   case AArch64::BI_InterlockedExchange8_nf:
1340   case AArch64::BI_InterlockedExchange16_nf:
1341   case AArch64::BI_InterlockedExchange_nf:
1342   case AArch64::BI_InterlockedExchange64_nf:
1343     return MSVCIntrin::_InterlockedExchange_nf;
1344   case AArch64::BI_InterlockedCompareExchange8_acq:
1345   case AArch64::BI_InterlockedCompareExchange16_acq:
1346   case AArch64::BI_InterlockedCompareExchange_acq:
1347   case AArch64::BI_InterlockedCompareExchange64_acq:
1348     return MSVCIntrin::_InterlockedCompareExchange_acq;
1349   case AArch64::BI_InterlockedCompareExchange8_rel:
1350   case AArch64::BI_InterlockedCompareExchange16_rel:
1351   case AArch64::BI_InterlockedCompareExchange_rel:
1352   case AArch64::BI_InterlockedCompareExchange64_rel:
1353     return MSVCIntrin::_InterlockedCompareExchange_rel;
1354   case AArch64::BI_InterlockedCompareExchange8_nf:
1355   case AArch64::BI_InterlockedCompareExchange16_nf:
1356   case AArch64::BI_InterlockedCompareExchange_nf:
1357   case AArch64::BI_InterlockedCompareExchange64_nf:
1358     return MSVCIntrin::_InterlockedCompareExchange_nf;
1359   case AArch64::BI_InterlockedCompareExchange128:
1360     return MSVCIntrin::_InterlockedCompareExchange128;
1361   case AArch64::BI_InterlockedCompareExchange128_acq:
1362     return MSVCIntrin::_InterlockedCompareExchange128_acq;
1363   case AArch64::BI_InterlockedCompareExchange128_nf:
1364     return MSVCIntrin::_InterlockedCompareExchange128_nf;
1365   case AArch64::BI_InterlockedCompareExchange128_rel:
1366     return MSVCIntrin::_InterlockedCompareExchange128_rel;
1367   case AArch64::BI_InterlockedOr8_acq:
1368   case AArch64::BI_InterlockedOr16_acq:
1369   case AArch64::BI_InterlockedOr_acq:
1370   case AArch64::BI_InterlockedOr64_acq:
1371     return MSVCIntrin::_InterlockedOr_acq;
1372   case AArch64::BI_InterlockedOr8_rel:
1373   case AArch64::BI_InterlockedOr16_rel:
1374   case AArch64::BI_InterlockedOr_rel:
1375   case AArch64::BI_InterlockedOr64_rel:
1376     return MSVCIntrin::_InterlockedOr_rel;
1377   case AArch64::BI_InterlockedOr8_nf:
1378   case AArch64::BI_InterlockedOr16_nf:
1379   case AArch64::BI_InterlockedOr_nf:
1380   case AArch64::BI_InterlockedOr64_nf:
1381     return MSVCIntrin::_InterlockedOr_nf;
1382   case AArch64::BI_InterlockedXor8_acq:
1383   case AArch64::BI_InterlockedXor16_acq:
1384   case AArch64::BI_InterlockedXor_acq:
1385   case AArch64::BI_InterlockedXor64_acq:
1386     return MSVCIntrin::_InterlockedXor_acq;
1387   case AArch64::BI_InterlockedXor8_rel:
1388   case AArch64::BI_InterlockedXor16_rel:
1389   case AArch64::BI_InterlockedXor_rel:
1390   case AArch64::BI_InterlockedXor64_rel:
1391     return MSVCIntrin::_InterlockedXor_rel;
1392   case AArch64::BI_InterlockedXor8_nf:
1393   case AArch64::BI_InterlockedXor16_nf:
1394   case AArch64::BI_InterlockedXor_nf:
1395   case AArch64::BI_InterlockedXor64_nf:
1396     return MSVCIntrin::_InterlockedXor_nf;
1397   case AArch64::BI_InterlockedAnd8_acq:
1398   case AArch64::BI_InterlockedAnd16_acq:
1399   case AArch64::BI_InterlockedAnd_acq:
1400   case AArch64::BI_InterlockedAnd64_acq:
1401     return MSVCIntrin::_InterlockedAnd_acq;
1402   case AArch64::BI_InterlockedAnd8_rel:
1403   case AArch64::BI_InterlockedAnd16_rel:
1404   case AArch64::BI_InterlockedAnd_rel:
1405   case AArch64::BI_InterlockedAnd64_rel:
1406     return MSVCIntrin::_InterlockedAnd_rel;
1407   case AArch64::BI_InterlockedAnd8_nf:
1408   case AArch64::BI_InterlockedAnd16_nf:
1409   case AArch64::BI_InterlockedAnd_nf:
1410   case AArch64::BI_InterlockedAnd64_nf:
1411     return MSVCIntrin::_InterlockedAnd_nf;
1412   case AArch64::BI_InterlockedIncrement16_acq:
1413   case AArch64::BI_InterlockedIncrement_acq:
1414   case AArch64::BI_InterlockedIncrement64_acq:
1415     return MSVCIntrin::_InterlockedIncrement_acq;
1416   case AArch64::BI_InterlockedIncrement16_rel:
1417   case AArch64::BI_InterlockedIncrement_rel:
1418   case AArch64::BI_InterlockedIncrement64_rel:
1419     return MSVCIntrin::_InterlockedIncrement_rel;
1420   case AArch64::BI_InterlockedIncrement16_nf:
1421   case AArch64::BI_InterlockedIncrement_nf:
1422   case AArch64::BI_InterlockedIncrement64_nf:
1423     return MSVCIntrin::_InterlockedIncrement_nf;
1424   case AArch64::BI_InterlockedDecrement16_acq:
1425   case AArch64::BI_InterlockedDecrement_acq:
1426   case AArch64::BI_InterlockedDecrement64_acq:
1427     return MSVCIntrin::_InterlockedDecrement_acq;
1428   case AArch64::BI_InterlockedDecrement16_rel:
1429   case AArch64::BI_InterlockedDecrement_rel:
1430   case AArch64::BI_InterlockedDecrement64_rel:
1431     return MSVCIntrin::_InterlockedDecrement_rel;
1432   case AArch64::BI_InterlockedDecrement16_nf:
1433   case AArch64::BI_InterlockedDecrement_nf:
1434   case AArch64::BI_InterlockedDecrement64_nf:
1435     return MSVCIntrin::_InterlockedDecrement_nf;
1436   }
1437   llvm_unreachable("must return from switch");
1438 }
1439 
1440 static Optional<CodeGenFunction::MSVCIntrin>
1441 translateX86ToMsvcIntrin(unsigned BuiltinID) {
1442   using MSVCIntrin = CodeGenFunction::MSVCIntrin;
1443   switch (BuiltinID) {
1444   default:
1445     return None;
1446   case clang::X86::BI_BitScanForward:
1447   case clang::X86::BI_BitScanForward64:
1448     return MSVCIntrin::_BitScanForward;
1449   case clang::X86::BI_BitScanReverse:
1450   case clang::X86::BI_BitScanReverse64:
1451     return MSVCIntrin::_BitScanReverse;
1452   case clang::X86::BI_InterlockedAnd64:
1453     return MSVCIntrin::_InterlockedAnd;
1454   case clang::X86::BI_InterlockedCompareExchange128:
1455     return MSVCIntrin::_InterlockedCompareExchange128;
1456   case clang::X86::BI_InterlockedExchange64:
1457     return MSVCIntrin::_InterlockedExchange;
1458   case clang::X86::BI_InterlockedExchangeAdd64:
1459     return MSVCIntrin::_InterlockedExchangeAdd;
1460   case clang::X86::BI_InterlockedExchangeSub64:
1461     return MSVCIntrin::_InterlockedExchangeSub;
1462   case clang::X86::BI_InterlockedOr64:
1463     return MSVCIntrin::_InterlockedOr;
1464   case clang::X86::BI_InterlockedXor64:
1465     return MSVCIntrin::_InterlockedXor;
1466   case clang::X86::BI_InterlockedDecrement64:
1467     return MSVCIntrin::_InterlockedDecrement;
1468   case clang::X86::BI_InterlockedIncrement64:
1469     return MSVCIntrin::_InterlockedIncrement;
1470   }
1471   llvm_unreachable("must return from switch");
1472 }
1473 
1474 // Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1475 Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
1476                                             const CallExpr *E) {
1477   switch (BuiltinID) {
1478   case MSVCIntrin::_BitScanForward:
1479   case MSVCIntrin::_BitScanReverse: {
1480     Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1481     Value *ArgValue = EmitScalarExpr(E->getArg(1));
1482 
1483     llvm::Type *ArgType = ArgValue->getType();
1484     llvm::Type *IndexType =
1485         IndexAddress.getPointer()->getType()->getPointerElementType();
1486     llvm::Type *ResultType = ConvertType(E->getType());
1487 
1488     Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1489     Value *ResZero = llvm::Constant::getNullValue(ResultType);
1490     Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1491 
1492     BasicBlock *Begin = Builder.GetInsertBlock();
1493     BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1494     Builder.SetInsertPoint(End);
1495     PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1496 
1497     Builder.SetInsertPoint(Begin);
1498     Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1499     BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1500     Builder.CreateCondBr(IsZero, End, NotZero);
1501     Result->addIncoming(ResZero, Begin);
1502 
1503     Builder.SetInsertPoint(NotZero);
1504 
1505     if (BuiltinID == MSVCIntrin::_BitScanForward) {
1506       Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1507       Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1508       ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1509       Builder.CreateStore(ZeroCount, IndexAddress, false);
1510     } else {
1511       unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1512       Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1513 
1514       Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1515       Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1516       ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1517       Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1518       Builder.CreateStore(Index, IndexAddress, false);
1519     }
1520     Builder.CreateBr(End);
1521     Result->addIncoming(ResOne, NotZero);
1522 
1523     Builder.SetInsertPoint(End);
1524     return Result;
1525   }
1526   case MSVCIntrin::_InterlockedAnd:
1527     return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1528   case MSVCIntrin::_InterlockedExchange:
1529     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1530   case MSVCIntrin::_InterlockedExchangeAdd:
1531     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1532   case MSVCIntrin::_InterlockedExchangeSub:
1533     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1534   case MSVCIntrin::_InterlockedOr:
1535     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1536   case MSVCIntrin::_InterlockedXor:
1537     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1538   case MSVCIntrin::_InterlockedExchangeAdd_acq:
1539     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1540                                  AtomicOrdering::Acquire);
1541   case MSVCIntrin::_InterlockedExchangeAdd_rel:
1542     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1543                                  AtomicOrdering::Release);
1544   case MSVCIntrin::_InterlockedExchangeAdd_nf:
1545     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1546                                  AtomicOrdering::Monotonic);
1547   case MSVCIntrin::_InterlockedExchange_acq:
1548     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1549                                  AtomicOrdering::Acquire);
1550   case MSVCIntrin::_InterlockedExchange_rel:
1551     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1552                                  AtomicOrdering::Release);
1553   case MSVCIntrin::_InterlockedExchange_nf:
1554     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1555                                  AtomicOrdering::Monotonic);
1556   case MSVCIntrin::_InterlockedCompareExchange_acq:
1557     return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1558   case MSVCIntrin::_InterlockedCompareExchange_rel:
1559     return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1560   case MSVCIntrin::_InterlockedCompareExchange_nf:
1561     return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1562   case MSVCIntrin::_InterlockedCompareExchange128:
1563     return EmitAtomicCmpXchg128ForMSIntrin(
1564         *this, E, AtomicOrdering::SequentiallyConsistent);
1565   case MSVCIntrin::_InterlockedCompareExchange128_acq:
1566     return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1567   case MSVCIntrin::_InterlockedCompareExchange128_rel:
1568     return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1569   case MSVCIntrin::_InterlockedCompareExchange128_nf:
1570     return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1571   case MSVCIntrin::_InterlockedOr_acq:
1572     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1573                                  AtomicOrdering::Acquire);
1574   case MSVCIntrin::_InterlockedOr_rel:
1575     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1576                                  AtomicOrdering::Release);
1577   case MSVCIntrin::_InterlockedOr_nf:
1578     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1579                                  AtomicOrdering::Monotonic);
1580   case MSVCIntrin::_InterlockedXor_acq:
1581     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1582                                  AtomicOrdering::Acquire);
1583   case MSVCIntrin::_InterlockedXor_rel:
1584     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1585                                  AtomicOrdering::Release);
1586   case MSVCIntrin::_InterlockedXor_nf:
1587     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1588                                  AtomicOrdering::Monotonic);
1589   case MSVCIntrin::_InterlockedAnd_acq:
1590     return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1591                                  AtomicOrdering::Acquire);
1592   case MSVCIntrin::_InterlockedAnd_rel:
1593     return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1594                                  AtomicOrdering::Release);
1595   case MSVCIntrin::_InterlockedAnd_nf:
1596     return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1597                                  AtomicOrdering::Monotonic);
1598   case MSVCIntrin::_InterlockedIncrement_acq:
1599     return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1600   case MSVCIntrin::_InterlockedIncrement_rel:
1601     return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1602   case MSVCIntrin::_InterlockedIncrement_nf:
1603     return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1604   case MSVCIntrin::_InterlockedDecrement_acq:
1605     return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1606   case MSVCIntrin::_InterlockedDecrement_rel:
1607     return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1608   case MSVCIntrin::_InterlockedDecrement_nf:
1609     return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1610 
1611   case MSVCIntrin::_InterlockedDecrement:
1612     return EmitAtomicDecrementValue(*this, E);
1613   case MSVCIntrin::_InterlockedIncrement:
1614     return EmitAtomicIncrementValue(*this, E);
1615 
1616   case MSVCIntrin::__fastfail: {
1617     // Request immediate process termination from the kernel. The instruction
1618     // sequences to do this are documented on MSDN:
1619     // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1620     llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1621     StringRef Asm, Constraints;
1622     switch (ISA) {
1623     default:
1624       ErrorUnsupported(E, "__fastfail call for this architecture");
1625       break;
1626     case llvm::Triple::x86:
1627     case llvm::Triple::x86_64:
1628       Asm = "int $$0x29";
1629       Constraints = "{cx}";
1630       break;
1631     case llvm::Triple::thumb:
1632       Asm = "udf #251";
1633       Constraints = "{r0}";
1634       break;
1635     case llvm::Triple::aarch64:
1636       Asm = "brk #0xF003";
1637       Constraints = "{w0}";
1638     }
1639     llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
1640     llvm::InlineAsm *IA =
1641         llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1642     llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
1643         getLLVMContext(), llvm::AttributeList::FunctionIndex,
1644         llvm::Attribute::NoReturn);
1645     llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
1646     CI->setAttributes(NoReturnAttr);
1647     return CI;
1648   }
1649   }
1650   llvm_unreachable("Incorrect MSVC intrinsic!");
1651 }
1652 
1653 namespace {
1654 // ARC cleanup for __builtin_os_log_format
1655 struct CallObjCArcUse final : EHScopeStack::Cleanup {
1656   CallObjCArcUse(llvm::Value *object) : object(object) {}
1657   llvm::Value *object;
1658 
1659   void Emit(CodeGenFunction &CGF, Flags flags) override {
1660     CGF.EmitARCIntrinsicUse(object);
1661   }
1662 };
1663 }
1664 
1665 Value *CodeGenFunction::EmitCheckedArgForBuiltin(const Expr *E,
1666                                                  BuiltinCheckKind Kind) {
1667   assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero)
1668           && "Unsupported builtin check kind");
1669 
1670   Value *ArgValue = EmitScalarExpr(E);
1671   if (!SanOpts.has(SanitizerKind::Builtin) || !getTarget().isCLZForZeroUndef())
1672     return ArgValue;
1673 
1674   SanitizerScope SanScope(this);
1675   Value *Cond = Builder.CreateICmpNE(
1676       ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
1677   EmitCheck(std::make_pair(Cond, SanitizerKind::Builtin),
1678             SanitizerHandler::InvalidBuiltin,
1679             {EmitCheckSourceLocation(E->getExprLoc()),
1680              llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
1681             None);
1682   return ArgValue;
1683 }
1684 
1685 /// Get the argument type for arguments to os_log_helper.
1686 static CanQualType getOSLogArgType(ASTContext &C, int Size) {
1687   QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
1688   return C.getCanonicalType(UnsignedTy);
1689 }
1690 
1691 llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
1692     const analyze_os_log::OSLogBufferLayout &Layout,
1693     CharUnits BufferAlignment) {
1694   ASTContext &Ctx = getContext();
1695 
1696   llvm::SmallString<64> Name;
1697   {
1698     raw_svector_ostream OS(Name);
1699     OS << "__os_log_helper";
1700     OS << "_" << BufferAlignment.getQuantity();
1701     OS << "_" << int(Layout.getSummaryByte());
1702     OS << "_" << int(Layout.getNumArgsByte());
1703     for (const auto &Item : Layout.Items)
1704       OS << "_" << int(Item.getSizeByte()) << "_"
1705          << int(Item.getDescriptorByte());
1706   }
1707 
1708   if (llvm::Function *F = CGM.getModule().getFunction(Name))
1709     return F;
1710 
1711   llvm::SmallVector<QualType, 4> ArgTys;
1712   FunctionArgList Args;
1713   Args.push_back(ImplicitParamDecl::Create(
1714       Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
1715       ImplicitParamDecl::Other));
1716   ArgTys.emplace_back(Ctx.VoidPtrTy);
1717 
1718   for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
1719     char Size = Layout.Items[I].getSizeByte();
1720     if (!Size)
1721       continue;
1722 
1723     QualType ArgTy = getOSLogArgType(Ctx, Size);
1724     Args.push_back(ImplicitParamDecl::Create(
1725         Ctx, nullptr, SourceLocation(),
1726         &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
1727         ImplicitParamDecl::Other));
1728     ArgTys.emplace_back(ArgTy);
1729   }
1730 
1731   QualType ReturnTy = Ctx.VoidTy;
1732 
1733   // The helper function has linkonce_odr linkage to enable the linker to merge
1734   // identical functions. To ensure the merging always happens, 'noinline' is
1735   // attached to the function when compiling with -Oz.
1736   const CGFunctionInfo &FI =
1737       CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
1738   llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
1739   llvm::Function *Fn = llvm::Function::Create(
1740       FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
1741   Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
1742   CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
1743   CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
1744   Fn->setDoesNotThrow();
1745 
1746   // Attach 'noinline' at -Oz.
1747   if (CGM.getCodeGenOpts().OptimizeSize == 2)
1748     Fn->addFnAttr(llvm::Attribute::NoInline);
1749 
1750   auto NL = ApplyDebugLocation::CreateEmpty(*this);
1751   StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
1752 
1753   // Create a scope with an artificial location for the body of this function.
1754   auto AL = ApplyDebugLocation::CreateArtificial(*this);
1755 
1756   CharUnits Offset;
1757   Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
1758                   BufferAlignment);
1759   Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
1760                       Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
1761   Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
1762                       Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
1763 
1764   unsigned I = 1;
1765   for (const auto &Item : Layout.Items) {
1766     Builder.CreateStore(
1767         Builder.getInt8(Item.getDescriptorByte()),
1768         Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
1769     Builder.CreateStore(
1770         Builder.getInt8(Item.getSizeByte()),
1771         Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
1772 
1773     CharUnits Size = Item.size();
1774     if (!Size.getQuantity())
1775       continue;
1776 
1777     Address Arg = GetAddrOfLocalVar(Args[I]);
1778     Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
1779     Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
1780                                  "argDataCast");
1781     Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
1782     Offset += Size;
1783     ++I;
1784   }
1785 
1786   FinishFunction();
1787 
1788   return Fn;
1789 }
1790 
1791 RValue CodeGenFunction::emitBuiltinOSLogFormat(const CallExpr &E) {
1792   assert(E.getNumArgs() >= 2 &&
1793          "__builtin_os_log_format takes at least 2 arguments");
1794   ASTContext &Ctx = getContext();
1795   analyze_os_log::OSLogBufferLayout Layout;
1796   analyze_os_log::computeOSLogBufferLayout(Ctx, &E, Layout);
1797   Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
1798   llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
1799 
1800   // Ignore argument 1, the format string. It is not currently used.
1801   CallArgList Args;
1802   Args.add(RValue::get(BufAddr.getPointer()), Ctx.VoidPtrTy);
1803 
1804   for (const auto &Item : Layout.Items) {
1805     int Size = Item.getSizeByte();
1806     if (!Size)
1807       continue;
1808 
1809     llvm::Value *ArgVal;
1810 
1811     if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
1812       uint64_t Val = 0;
1813       for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
1814         Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
1815       ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
1816     } else if (const Expr *TheExpr = Item.getExpr()) {
1817       ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
1818 
1819       // If a temporary object that requires destruction after the full
1820       // expression is passed, push a lifetime-extended cleanup to extend its
1821       // lifetime to the end of the enclosing block scope.
1822       auto LifetimeExtendObject = [&](const Expr *E) {
1823         E = E->IgnoreParenCasts();
1824         // Extend lifetimes of objects returned by function calls and message
1825         // sends.
1826 
1827         // FIXME: We should do this in other cases in which temporaries are
1828         //        created including arguments of non-ARC types (e.g., C++
1829         //        temporaries).
1830         if (isa<CallExpr>(E) || isa<ObjCMessageExpr>(E))
1831           return true;
1832         return false;
1833       };
1834 
1835       if (TheExpr->getType()->isObjCRetainableType() &&
1836           getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
1837         assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
1838                "Only scalar can be a ObjC retainable type");
1839         if (!isa<Constant>(ArgVal)) {
1840           CleanupKind Cleanup = getARCCleanupKind();
1841           QualType Ty = TheExpr->getType();
1842           Address Alloca = Address::invalid();
1843           Address Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
1844           ArgVal = EmitARCRetain(Ty, ArgVal);
1845           Builder.CreateStore(ArgVal, Addr);
1846           pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
1847                                       CodeGenFunction::destroyARCStrongPrecise,
1848                                       Cleanup & EHCleanup);
1849 
1850           // Push a clang.arc.use call to ensure ARC optimizer knows that the
1851           // argument has to be alive.
1852           if (CGM.getCodeGenOpts().OptimizationLevel != 0)
1853             pushCleanupAfterFullExpr<CallObjCArcUse>(Cleanup, ArgVal);
1854         }
1855       }
1856     } else {
1857       ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
1858     }
1859 
1860     unsigned ArgValSize =
1861         CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
1862     llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
1863                                                      ArgValSize);
1864     ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
1865     CanQualType ArgTy = getOSLogArgType(Ctx, Size);
1866     // If ArgVal has type x86_fp80, zero-extend ArgVal.
1867     ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
1868     Args.add(RValue::get(ArgVal), ArgTy);
1869   }
1870 
1871   const CGFunctionInfo &FI =
1872       CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
1873   llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
1874       Layout, BufAddr.getAlignment());
1875   EmitCall(FI, CGCallee::forDirect(F), ReturnValueSlot(), Args);
1876   return RValue::get(BufAddr.getPointer());
1877 }
1878 
1879 static bool isSpecialUnsignedMultiplySignedResult(
1880     unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
1881     WidthAndSignedness ResultInfo) {
1882   return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1883          Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
1884          !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
1885 }
1886 
1887 static RValue EmitCheckedUnsignedMultiplySignedResult(
1888     CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
1889     const clang::Expr *Op2, WidthAndSignedness Op2Info,
1890     const clang::Expr *ResultArg, QualType ResultQTy,
1891     WidthAndSignedness ResultInfo) {
1892   assert(isSpecialUnsignedMultiplySignedResult(
1893              Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
1894          "Cannot specialize this multiply");
1895 
1896   llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
1897   llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
1898 
1899   llvm::Value *HasOverflow;
1900   llvm::Value *Result = EmitOverflowIntrinsic(
1901       CGF, llvm::Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
1902 
1903   // The intrinsic call will detect overflow when the value is > UINT_MAX,
1904   // however, since the original builtin had a signed result, we need to report
1905   // an overflow when the result is greater than INT_MAX.
1906   auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
1907   llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
1908 
1909   llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
1910   HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
1911 
1912   bool isVolatile =
1913       ResultArg->getType()->getPointeeType().isVolatileQualified();
1914   Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1915   CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
1916                           isVolatile);
1917   return RValue::get(HasOverflow);
1918 }
1919 
1920 /// Determine if a binop is a checked mixed-sign multiply we can specialize.
1921 static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
1922                                        WidthAndSignedness Op1Info,
1923                                        WidthAndSignedness Op2Info,
1924                                        WidthAndSignedness ResultInfo) {
1925   return BuiltinID == Builtin::BI__builtin_mul_overflow &&
1926          std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
1927          Op1Info.Signed != Op2Info.Signed;
1928 }
1929 
1930 /// Emit a checked mixed-sign multiply. This is a cheaper specialization of
1931 /// the generic checked-binop irgen.
1932 static RValue
1933 EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
1934                              WidthAndSignedness Op1Info, const clang::Expr *Op2,
1935                              WidthAndSignedness Op2Info,
1936                              const clang::Expr *ResultArg, QualType ResultQTy,
1937                              WidthAndSignedness ResultInfo) {
1938   assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
1939                                     Op2Info, ResultInfo) &&
1940          "Not a mixed-sign multipliction we can specialize");
1941 
1942   // Emit the signed and unsigned operands.
1943   const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
1944   const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
1945   llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
1946   llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
1947   unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
1948   unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
1949 
1950   // One of the operands may be smaller than the other. If so, [s|z]ext it.
1951   if (SignedOpWidth < UnsignedOpWidth)
1952     Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
1953   if (UnsignedOpWidth < SignedOpWidth)
1954     Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
1955 
1956   llvm::Type *OpTy = Signed->getType();
1957   llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
1958   Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
1959   llvm::Type *ResTy = ResultPtr.getElementType();
1960   unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
1961 
1962   // Take the absolute value of the signed operand.
1963   llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
1964   llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
1965   llvm::Value *AbsSigned =
1966       CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
1967 
1968   // Perform a checked unsigned multiplication.
1969   llvm::Value *UnsignedOverflow;
1970   llvm::Value *UnsignedResult =
1971       EmitOverflowIntrinsic(CGF, llvm::Intrinsic::umul_with_overflow, AbsSigned,
1972                             Unsigned, UnsignedOverflow);
1973 
1974   llvm::Value *Overflow, *Result;
1975   if (ResultInfo.Signed) {
1976     // Signed overflow occurs if the result is greater than INT_MAX or lesser
1977     // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
1978     auto IntMax =
1979         llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
1980     llvm::Value *MaxResult =
1981         CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
1982                               CGF.Builder.CreateZExt(IsNegative, OpTy));
1983     llvm::Value *SignedOverflow =
1984         CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
1985     Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
1986 
1987     // Prepare the signed result (possibly by negating it).
1988     llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
1989     llvm::Value *SignedResult =
1990         CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
1991     Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
1992   } else {
1993     // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
1994     llvm::Value *Underflow = CGF.Builder.CreateAnd(
1995         IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
1996     Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
1997     if (ResultInfo.Width < OpWidth) {
1998       auto IntMax =
1999           llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2000       llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2001           UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2002       Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2003     }
2004 
2005     // Negate the product if it would be negative in infinite precision.
2006     Result = CGF.Builder.CreateSelect(
2007         IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2008 
2009     Result = CGF.Builder.CreateTrunc(Result, ResTy);
2010   }
2011   assert(Overflow && Result && "Missing overflow or result");
2012 
2013   bool isVolatile =
2014       ResultArg->getType()->getPointeeType().isVolatileQualified();
2015   CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2016                           isVolatile);
2017   return RValue::get(Overflow);
2018 }
2019 
2020 static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
2021                                Value *&RecordPtr, CharUnits Align,
2022                                llvm::FunctionCallee Func, int Lvl) {
2023   ASTContext &Context = CGF.getContext();
2024   RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
2025   std::string Pad = std::string(Lvl * 4, ' ');
2026 
2027   Value *GString =
2028       CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
2029   Value *Res = CGF.Builder.CreateCall(Func, {GString});
2030 
2031   static llvm::DenseMap<QualType, const char *> Types;
2032   if (Types.empty()) {
2033     Types[Context.CharTy] = "%c";
2034     Types[Context.BoolTy] = "%d";
2035     Types[Context.SignedCharTy] = "%hhd";
2036     Types[Context.UnsignedCharTy] = "%hhu";
2037     Types[Context.IntTy] = "%d";
2038     Types[Context.UnsignedIntTy] = "%u";
2039     Types[Context.LongTy] = "%ld";
2040     Types[Context.UnsignedLongTy] = "%lu";
2041     Types[Context.LongLongTy] = "%lld";
2042     Types[Context.UnsignedLongLongTy] = "%llu";
2043     Types[Context.ShortTy] = "%hd";
2044     Types[Context.UnsignedShortTy] = "%hu";
2045     Types[Context.VoidPtrTy] = "%p";
2046     Types[Context.FloatTy] = "%f";
2047     Types[Context.DoubleTy] = "%f";
2048     Types[Context.LongDoubleTy] = "%Lf";
2049     Types[Context.getPointerType(Context.CharTy)] = "%s";
2050     Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
2051   }
2052 
2053   for (const auto *FD : RD->fields()) {
2054     Value *FieldPtr = RecordPtr;
2055     if (RD->isUnion())
2056       FieldPtr = CGF.Builder.CreatePointerCast(
2057           FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
2058     else
2059       FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
2060                                              FD->getFieldIndex());
2061 
2062     GString = CGF.Builder.CreateGlobalStringPtr(
2063         llvm::Twine(Pad)
2064             .concat(FD->getType().getAsString())
2065             .concat(llvm::Twine(' '))
2066             .concat(FD->getNameAsString())
2067             .concat(" : ")
2068             .str());
2069     Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2070     Res = CGF.Builder.CreateAdd(Res, TmpRes);
2071 
2072     QualType CanonicalType =
2073         FD->getType().getUnqualifiedType().getCanonicalType();
2074 
2075     // We check whether we are in a recursive type
2076     if (CanonicalType->isRecordType()) {
2077       TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
2078       Res = CGF.Builder.CreateAdd(TmpRes, Res);
2079       continue;
2080     }
2081 
2082     // We try to determine the best format to print the current field
2083     llvm::Twine Format = Types.find(CanonicalType) == Types.end()
2084                              ? Types[Context.VoidPtrTy]
2085                              : Types[CanonicalType];
2086 
2087     Address FieldAddress = Address(FieldPtr, Align);
2088     FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
2089 
2090     // FIXME Need to handle bitfield here
2091     GString = CGF.Builder.CreateGlobalStringPtr(
2092         Format.concat(llvm::Twine('\n')).str());
2093     TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
2094     Res = CGF.Builder.CreateAdd(Res, TmpRes);
2095   }
2096 
2097   GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
2098   Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
2099   Res = CGF.Builder.CreateAdd(Res, TmpRes);
2100   return Res;
2101 }
2102 
2103 static bool
2104 TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
2105                               llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2106   if (const auto *Arr = Ctx.getAsArrayType(Ty))
2107     Ty = Ctx.getBaseElementType(Arr);
2108 
2109   const auto *Record = Ty->getAsCXXRecordDecl();
2110   if (!Record)
2111     return false;
2112 
2113   // We've already checked this type, or are in the process of checking it.
2114   if (!Seen.insert(Record).second)
2115     return false;
2116 
2117   assert(Record->hasDefinition() &&
2118          "Incomplete types should already be diagnosed");
2119 
2120   if (Record->isDynamicClass())
2121     return true;
2122 
2123   for (FieldDecl *F : Record->fields()) {
2124     if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2125       return true;
2126   }
2127   return false;
2128 }
2129 
2130 /// Determine if the specified type requires laundering by checking if it is a
2131 /// dynamic class type or contains a subobject which is a dynamic class type.
2132 static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty) {
2133   if (!CGM.getCodeGenOpts().StrictVTablePointers)
2134     return false;
2135   llvm::SmallPtrSet<const Decl *, 16> Seen;
2136   return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2137 }
2138 
2139 RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2140   llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2141   llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2142 
2143   // The builtin's shift arg may have a different type than the source arg and
2144   // result, but the LLVM intrinsic uses the same type for all values.
2145   llvm::Type *Ty = Src->getType();
2146   ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2147 
2148   // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2149   unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2150   Function *F = CGM.getIntrinsic(IID, Ty);
2151   return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2152 }
2153 
2154 // Map math builtins for long-double to f128 version.
2155 static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2156   switch (BuiltinID) {
2157 #define MUTATE_LDBL(func) \
2158   case Builtin::BI__builtin_##func##l: \
2159     return Builtin::BI__builtin_##func##f128;
2160   MUTATE_LDBL(sqrt)
2161   MUTATE_LDBL(cbrt)
2162   MUTATE_LDBL(fabs)
2163   MUTATE_LDBL(log)
2164   MUTATE_LDBL(log2)
2165   MUTATE_LDBL(log10)
2166   MUTATE_LDBL(log1p)
2167   MUTATE_LDBL(logb)
2168   MUTATE_LDBL(exp)
2169   MUTATE_LDBL(exp2)
2170   MUTATE_LDBL(expm1)
2171   MUTATE_LDBL(fdim)
2172   MUTATE_LDBL(hypot)
2173   MUTATE_LDBL(ilogb)
2174   MUTATE_LDBL(pow)
2175   MUTATE_LDBL(fmin)
2176   MUTATE_LDBL(fmax)
2177   MUTATE_LDBL(ceil)
2178   MUTATE_LDBL(trunc)
2179   MUTATE_LDBL(rint)
2180   MUTATE_LDBL(nearbyint)
2181   MUTATE_LDBL(round)
2182   MUTATE_LDBL(floor)
2183   MUTATE_LDBL(lround)
2184   MUTATE_LDBL(llround)
2185   MUTATE_LDBL(lrint)
2186   MUTATE_LDBL(llrint)
2187   MUTATE_LDBL(fmod)
2188   MUTATE_LDBL(modf)
2189   MUTATE_LDBL(nan)
2190   MUTATE_LDBL(nans)
2191   MUTATE_LDBL(inf)
2192   MUTATE_LDBL(fma)
2193   MUTATE_LDBL(sin)
2194   MUTATE_LDBL(cos)
2195   MUTATE_LDBL(tan)
2196   MUTATE_LDBL(sinh)
2197   MUTATE_LDBL(cosh)
2198   MUTATE_LDBL(tanh)
2199   MUTATE_LDBL(asin)
2200   MUTATE_LDBL(acos)
2201   MUTATE_LDBL(atan)
2202   MUTATE_LDBL(asinh)
2203   MUTATE_LDBL(acosh)
2204   MUTATE_LDBL(atanh)
2205   MUTATE_LDBL(atan2)
2206   MUTATE_LDBL(erf)
2207   MUTATE_LDBL(erfc)
2208   MUTATE_LDBL(ldexp)
2209   MUTATE_LDBL(frexp)
2210   MUTATE_LDBL(huge_val)
2211   MUTATE_LDBL(copysign)
2212   MUTATE_LDBL(nextafter)
2213   MUTATE_LDBL(nexttoward)
2214   MUTATE_LDBL(remainder)
2215   MUTATE_LDBL(remquo)
2216   MUTATE_LDBL(scalbln)
2217   MUTATE_LDBL(scalbn)
2218   MUTATE_LDBL(tgamma)
2219   MUTATE_LDBL(lgamma)
2220 #undef MUTATE_LDBL
2221   default:
2222     return BuiltinID;
2223   }
2224 }
2225 
2226 RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
2227                                         const CallExpr *E,
2228                                         ReturnValueSlot ReturnValue) {
2229   const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2230   // See if we can constant fold this builtin.  If so, don't emit it at all.
2231   Expr::EvalResult Result;
2232   if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
2233       !Result.hasSideEffects()) {
2234     if (Result.Val.isInt())
2235       return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2236                                                 Result.Val.getInt()));
2237     if (Result.Val.isFloat())
2238       return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2239                                                Result.Val.getFloat()));
2240   }
2241 
2242   // If current long-double semantics is IEEE 128-bit, replace math builtins
2243   // of long-double with f128 equivalent.
2244   // TODO: This mutation should also be applied to other targets other than PPC,
2245   // after backend supports IEEE 128-bit style libcalls.
2246   if (getTarget().getTriple().isPPC64() &&
2247       &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2248     BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2249 
2250   // If the builtin has been declared explicitly with an assembler label,
2251   // disable the specialized emitting below. Ideally we should communicate the
2252   // rename in IR, or at least avoid generating the intrinsic calls that are
2253   // likely to get lowered to the renamed library functions.
2254   const unsigned BuiltinIDIfNoAsmLabel =
2255       FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2256 
2257   // There are LLVM math intrinsics/instructions corresponding to math library
2258   // functions except the LLVM op will never set errno while the math library
2259   // might. Also, math builtins have the same semantics as their math library
2260   // twins. Thus, we can transform math library and builtin calls to their
2261   // LLVM counterparts if the call is marked 'const' (known to never set errno).
2262   if (FD->hasAttr<ConstAttr>()) {
2263     switch (BuiltinIDIfNoAsmLabel) {
2264     case Builtin::BIceil:
2265     case Builtin::BIceilf:
2266     case Builtin::BIceill:
2267     case Builtin::BI__builtin_ceil:
2268     case Builtin::BI__builtin_ceilf:
2269     case Builtin::BI__builtin_ceilf16:
2270     case Builtin::BI__builtin_ceill:
2271     case Builtin::BI__builtin_ceilf128:
2272       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2273                                    Intrinsic::ceil,
2274                                    Intrinsic::experimental_constrained_ceil));
2275 
2276     case Builtin::BIcopysign:
2277     case Builtin::BIcopysignf:
2278     case Builtin::BIcopysignl:
2279     case Builtin::BI__builtin_copysign:
2280     case Builtin::BI__builtin_copysignf:
2281     case Builtin::BI__builtin_copysignf16:
2282     case Builtin::BI__builtin_copysignl:
2283     case Builtin::BI__builtin_copysignf128:
2284       return RValue::get(emitBinaryBuiltin(*this, E, Intrinsic::copysign));
2285 
2286     case Builtin::BIcos:
2287     case Builtin::BIcosf:
2288     case Builtin::BIcosl:
2289     case Builtin::BI__builtin_cos:
2290     case Builtin::BI__builtin_cosf:
2291     case Builtin::BI__builtin_cosf16:
2292     case Builtin::BI__builtin_cosl:
2293     case Builtin::BI__builtin_cosf128:
2294       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2295                                    Intrinsic::cos,
2296                                    Intrinsic::experimental_constrained_cos));
2297 
2298     case Builtin::BIexp:
2299     case Builtin::BIexpf:
2300     case Builtin::BIexpl:
2301     case Builtin::BI__builtin_exp:
2302     case Builtin::BI__builtin_expf:
2303     case Builtin::BI__builtin_expf16:
2304     case Builtin::BI__builtin_expl:
2305     case Builtin::BI__builtin_expf128:
2306       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2307                                    Intrinsic::exp,
2308                                    Intrinsic::experimental_constrained_exp));
2309 
2310     case Builtin::BIexp2:
2311     case Builtin::BIexp2f:
2312     case Builtin::BIexp2l:
2313     case Builtin::BI__builtin_exp2:
2314     case Builtin::BI__builtin_exp2f:
2315     case Builtin::BI__builtin_exp2f16:
2316     case Builtin::BI__builtin_exp2l:
2317     case Builtin::BI__builtin_exp2f128:
2318       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2319                                    Intrinsic::exp2,
2320                                    Intrinsic::experimental_constrained_exp2));
2321 
2322     case Builtin::BIfabs:
2323     case Builtin::BIfabsf:
2324     case Builtin::BIfabsl:
2325     case Builtin::BI__builtin_fabs:
2326     case Builtin::BI__builtin_fabsf:
2327     case Builtin::BI__builtin_fabsf16:
2328     case Builtin::BI__builtin_fabsl:
2329     case Builtin::BI__builtin_fabsf128:
2330       return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::fabs));
2331 
2332     case Builtin::BIfloor:
2333     case Builtin::BIfloorf:
2334     case Builtin::BIfloorl:
2335     case Builtin::BI__builtin_floor:
2336     case Builtin::BI__builtin_floorf:
2337     case Builtin::BI__builtin_floorf16:
2338     case Builtin::BI__builtin_floorl:
2339     case Builtin::BI__builtin_floorf128:
2340       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2341                                    Intrinsic::floor,
2342                                    Intrinsic::experimental_constrained_floor));
2343 
2344     case Builtin::BIfma:
2345     case Builtin::BIfmaf:
2346     case Builtin::BIfmal:
2347     case Builtin::BI__builtin_fma:
2348     case Builtin::BI__builtin_fmaf:
2349     case Builtin::BI__builtin_fmaf16:
2350     case Builtin::BI__builtin_fmal:
2351     case Builtin::BI__builtin_fmaf128:
2352       return RValue::get(emitTernaryMaybeConstrainedFPBuiltin(*this, E,
2353                                    Intrinsic::fma,
2354                                    Intrinsic::experimental_constrained_fma));
2355 
2356     case Builtin::BIfmax:
2357     case Builtin::BIfmaxf:
2358     case Builtin::BIfmaxl:
2359     case Builtin::BI__builtin_fmax:
2360     case Builtin::BI__builtin_fmaxf:
2361     case Builtin::BI__builtin_fmaxf16:
2362     case Builtin::BI__builtin_fmaxl:
2363     case Builtin::BI__builtin_fmaxf128:
2364       return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2365                                    Intrinsic::maxnum,
2366                                    Intrinsic::experimental_constrained_maxnum));
2367 
2368     case Builtin::BIfmin:
2369     case Builtin::BIfminf:
2370     case Builtin::BIfminl:
2371     case Builtin::BI__builtin_fmin:
2372     case Builtin::BI__builtin_fminf:
2373     case Builtin::BI__builtin_fminf16:
2374     case Builtin::BI__builtin_fminl:
2375     case Builtin::BI__builtin_fminf128:
2376       return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2377                                    Intrinsic::minnum,
2378                                    Intrinsic::experimental_constrained_minnum));
2379 
2380     // fmod() is a special-case. It maps to the frem instruction rather than an
2381     // LLVM intrinsic.
2382     case Builtin::BIfmod:
2383     case Builtin::BIfmodf:
2384     case Builtin::BIfmodl:
2385     case Builtin::BI__builtin_fmod:
2386     case Builtin::BI__builtin_fmodf:
2387     case Builtin::BI__builtin_fmodf16:
2388     case Builtin::BI__builtin_fmodl:
2389     case Builtin::BI__builtin_fmodf128: {
2390       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2391       Value *Arg1 = EmitScalarExpr(E->getArg(0));
2392       Value *Arg2 = EmitScalarExpr(E->getArg(1));
2393       return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2394     }
2395 
2396     case Builtin::BIlog:
2397     case Builtin::BIlogf:
2398     case Builtin::BIlogl:
2399     case Builtin::BI__builtin_log:
2400     case Builtin::BI__builtin_logf:
2401     case Builtin::BI__builtin_logf16:
2402     case Builtin::BI__builtin_logl:
2403     case Builtin::BI__builtin_logf128:
2404       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2405                                    Intrinsic::log,
2406                                    Intrinsic::experimental_constrained_log));
2407 
2408     case Builtin::BIlog10:
2409     case Builtin::BIlog10f:
2410     case Builtin::BIlog10l:
2411     case Builtin::BI__builtin_log10:
2412     case Builtin::BI__builtin_log10f:
2413     case Builtin::BI__builtin_log10f16:
2414     case Builtin::BI__builtin_log10l:
2415     case Builtin::BI__builtin_log10f128:
2416       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2417                                    Intrinsic::log10,
2418                                    Intrinsic::experimental_constrained_log10));
2419 
2420     case Builtin::BIlog2:
2421     case Builtin::BIlog2f:
2422     case Builtin::BIlog2l:
2423     case Builtin::BI__builtin_log2:
2424     case Builtin::BI__builtin_log2f:
2425     case Builtin::BI__builtin_log2f16:
2426     case Builtin::BI__builtin_log2l:
2427     case Builtin::BI__builtin_log2f128:
2428       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2429                                    Intrinsic::log2,
2430                                    Intrinsic::experimental_constrained_log2));
2431 
2432     case Builtin::BInearbyint:
2433     case Builtin::BInearbyintf:
2434     case Builtin::BInearbyintl:
2435     case Builtin::BI__builtin_nearbyint:
2436     case Builtin::BI__builtin_nearbyintf:
2437     case Builtin::BI__builtin_nearbyintl:
2438     case Builtin::BI__builtin_nearbyintf128:
2439       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2440                                 Intrinsic::nearbyint,
2441                                 Intrinsic::experimental_constrained_nearbyint));
2442 
2443     case Builtin::BIpow:
2444     case Builtin::BIpowf:
2445     case Builtin::BIpowl:
2446     case Builtin::BI__builtin_pow:
2447     case Builtin::BI__builtin_powf:
2448     case Builtin::BI__builtin_powf16:
2449     case Builtin::BI__builtin_powl:
2450     case Builtin::BI__builtin_powf128:
2451       return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(*this, E,
2452                                    Intrinsic::pow,
2453                                    Intrinsic::experimental_constrained_pow));
2454 
2455     case Builtin::BIrint:
2456     case Builtin::BIrintf:
2457     case Builtin::BIrintl:
2458     case Builtin::BI__builtin_rint:
2459     case Builtin::BI__builtin_rintf:
2460     case Builtin::BI__builtin_rintf16:
2461     case Builtin::BI__builtin_rintl:
2462     case Builtin::BI__builtin_rintf128:
2463       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2464                                    Intrinsic::rint,
2465                                    Intrinsic::experimental_constrained_rint));
2466 
2467     case Builtin::BIround:
2468     case Builtin::BIroundf:
2469     case Builtin::BIroundl:
2470     case Builtin::BI__builtin_round:
2471     case Builtin::BI__builtin_roundf:
2472     case Builtin::BI__builtin_roundf16:
2473     case Builtin::BI__builtin_roundl:
2474     case Builtin::BI__builtin_roundf128:
2475       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2476                                    Intrinsic::round,
2477                                    Intrinsic::experimental_constrained_round));
2478 
2479     case Builtin::BIsin:
2480     case Builtin::BIsinf:
2481     case Builtin::BIsinl:
2482     case Builtin::BI__builtin_sin:
2483     case Builtin::BI__builtin_sinf:
2484     case Builtin::BI__builtin_sinf16:
2485     case Builtin::BI__builtin_sinl:
2486     case Builtin::BI__builtin_sinf128:
2487       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2488                                    Intrinsic::sin,
2489                                    Intrinsic::experimental_constrained_sin));
2490 
2491     case Builtin::BIsqrt:
2492     case Builtin::BIsqrtf:
2493     case Builtin::BIsqrtl:
2494     case Builtin::BI__builtin_sqrt:
2495     case Builtin::BI__builtin_sqrtf:
2496     case Builtin::BI__builtin_sqrtf16:
2497     case Builtin::BI__builtin_sqrtl:
2498     case Builtin::BI__builtin_sqrtf128:
2499       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2500                                    Intrinsic::sqrt,
2501                                    Intrinsic::experimental_constrained_sqrt));
2502 
2503     case Builtin::BItrunc:
2504     case Builtin::BItruncf:
2505     case Builtin::BItruncl:
2506     case Builtin::BI__builtin_trunc:
2507     case Builtin::BI__builtin_truncf:
2508     case Builtin::BI__builtin_truncf16:
2509     case Builtin::BI__builtin_truncl:
2510     case Builtin::BI__builtin_truncf128:
2511       return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(*this, E,
2512                                    Intrinsic::trunc,
2513                                    Intrinsic::experimental_constrained_trunc));
2514 
2515     case Builtin::BIlround:
2516     case Builtin::BIlroundf:
2517     case Builtin::BIlroundl:
2518     case Builtin::BI__builtin_lround:
2519     case Builtin::BI__builtin_lroundf:
2520     case Builtin::BI__builtin_lroundl:
2521     case Builtin::BI__builtin_lroundf128:
2522       return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2523           *this, E, Intrinsic::lround,
2524           Intrinsic::experimental_constrained_lround));
2525 
2526     case Builtin::BIllround:
2527     case Builtin::BIllroundf:
2528     case Builtin::BIllroundl:
2529     case Builtin::BI__builtin_llround:
2530     case Builtin::BI__builtin_llroundf:
2531     case Builtin::BI__builtin_llroundl:
2532     case Builtin::BI__builtin_llroundf128:
2533       return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2534           *this, E, Intrinsic::llround,
2535           Intrinsic::experimental_constrained_llround));
2536 
2537     case Builtin::BIlrint:
2538     case Builtin::BIlrintf:
2539     case Builtin::BIlrintl:
2540     case Builtin::BI__builtin_lrint:
2541     case Builtin::BI__builtin_lrintf:
2542     case Builtin::BI__builtin_lrintl:
2543     case Builtin::BI__builtin_lrintf128:
2544       return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2545           *this, E, Intrinsic::lrint,
2546           Intrinsic::experimental_constrained_lrint));
2547 
2548     case Builtin::BIllrint:
2549     case Builtin::BIllrintf:
2550     case Builtin::BIllrintl:
2551     case Builtin::BI__builtin_llrint:
2552     case Builtin::BI__builtin_llrintf:
2553     case Builtin::BI__builtin_llrintl:
2554     case Builtin::BI__builtin_llrintf128:
2555       return RValue::get(emitMaybeConstrainedFPToIntRoundBuiltin(
2556           *this, E, Intrinsic::llrint,
2557           Intrinsic::experimental_constrained_llrint));
2558 
2559     default:
2560       break;
2561     }
2562   }
2563 
2564   switch (BuiltinIDIfNoAsmLabel) {
2565   default: break;
2566   case Builtin::BI__builtin___CFStringMakeConstantString:
2567   case Builtin::BI__builtin___NSStringMakeConstantString:
2568     return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
2569   case Builtin::BI__builtin_stdarg_start:
2570   case Builtin::BI__builtin_va_start:
2571   case Builtin::BI__va_start:
2572   case Builtin::BI__builtin_va_end:
2573     return RValue::get(
2574         EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
2575                            ? EmitScalarExpr(E->getArg(0))
2576                            : EmitVAListRef(E->getArg(0)).getPointer(),
2577                        BuiltinID != Builtin::BI__builtin_va_end));
2578   case Builtin::BI__builtin_va_copy: {
2579     Value *DstPtr = EmitVAListRef(E->getArg(0)).getPointer();
2580     Value *SrcPtr = EmitVAListRef(E->getArg(1)).getPointer();
2581 
2582     llvm::Type *Type = Int8PtrTy;
2583 
2584     DstPtr = Builder.CreateBitCast(DstPtr, Type);
2585     SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
2586     return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy),
2587                                           {DstPtr, SrcPtr}));
2588   }
2589   case Builtin::BI__builtin_abs:
2590   case Builtin::BI__builtin_labs:
2591   case Builtin::BI__builtin_llabs: {
2592     // X < 0 ? -X : X
2593     // The negation has 'nsw' because abs of INT_MIN is undefined.
2594     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2595     Value *NegOp = Builder.CreateNSWNeg(ArgValue, "neg");
2596     Constant *Zero = llvm::Constant::getNullValue(ArgValue->getType());
2597     Value *CmpResult = Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2598     Value *Result = Builder.CreateSelect(CmpResult, NegOp, ArgValue, "abs");
2599     return RValue::get(Result);
2600   }
2601   case Builtin::BI__builtin_complex: {
2602     Value *Real = EmitScalarExpr(E->getArg(0));
2603     Value *Imag = EmitScalarExpr(E->getArg(1));
2604     return RValue::getComplex({Real, Imag});
2605   }
2606   case Builtin::BI__builtin_conj:
2607   case Builtin::BI__builtin_conjf:
2608   case Builtin::BI__builtin_conjl:
2609   case Builtin::BIconj:
2610   case Builtin::BIconjf:
2611   case Builtin::BIconjl: {
2612     ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2613     Value *Real = ComplexVal.first;
2614     Value *Imag = ComplexVal.second;
2615     Imag = Builder.CreateFNeg(Imag, "neg");
2616     return RValue::getComplex(std::make_pair(Real, Imag));
2617   }
2618   case Builtin::BI__builtin_creal:
2619   case Builtin::BI__builtin_crealf:
2620   case Builtin::BI__builtin_creall:
2621   case Builtin::BIcreal:
2622   case Builtin::BIcrealf:
2623   case Builtin::BIcreall: {
2624     ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2625     return RValue::get(ComplexVal.first);
2626   }
2627 
2628   case Builtin::BI__builtin_dump_struct: {
2629     llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
2630     llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
2631         LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
2632 
2633     Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
2634     CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
2635 
2636     const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
2637     QualType Arg0Type = Arg0->getType()->getPointeeType();
2638 
2639     Value *RecordPtr = EmitScalarExpr(Arg0);
2640     Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
2641                             {LLVMFuncType, Func}, 0);
2642     return RValue::get(Res);
2643   }
2644 
2645   case Builtin::BI__builtin_preserve_access_index: {
2646     // Only enabled preserved access index region when debuginfo
2647     // is available as debuginfo is needed to preserve user-level
2648     // access pattern.
2649     if (!getDebugInfo()) {
2650       CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
2651       return RValue::get(EmitScalarExpr(E->getArg(0)));
2652     }
2653 
2654     // Nested builtin_preserve_access_index() not supported
2655     if (IsInPreservedAIRegion) {
2656       CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
2657       return RValue::get(EmitScalarExpr(E->getArg(0)));
2658     }
2659 
2660     IsInPreservedAIRegion = true;
2661     Value *Res = EmitScalarExpr(E->getArg(0));
2662     IsInPreservedAIRegion = false;
2663     return RValue::get(Res);
2664   }
2665 
2666   case Builtin::BI__builtin_cimag:
2667   case Builtin::BI__builtin_cimagf:
2668   case Builtin::BI__builtin_cimagl:
2669   case Builtin::BIcimag:
2670   case Builtin::BIcimagf:
2671   case Builtin::BIcimagl: {
2672     ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2673     return RValue::get(ComplexVal.second);
2674   }
2675 
2676   case Builtin::BI__builtin_clrsb:
2677   case Builtin::BI__builtin_clrsbl:
2678   case Builtin::BI__builtin_clrsbll: {
2679     // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
2680     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2681 
2682     llvm::Type *ArgType = ArgValue->getType();
2683     Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2684 
2685     llvm::Type *ResultType = ConvertType(E->getType());
2686     Value *Zero = llvm::Constant::getNullValue(ArgType);
2687     Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
2688     Value *Inverse = Builder.CreateNot(ArgValue, "not");
2689     Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
2690     Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
2691     Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
2692     Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2693                                    "cast");
2694     return RValue::get(Result);
2695   }
2696   case Builtin::BI__builtin_ctzs:
2697   case Builtin::BI__builtin_ctz:
2698   case Builtin::BI__builtin_ctzl:
2699   case Builtin::BI__builtin_ctzll: {
2700     Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CTZPassedZero);
2701 
2702     llvm::Type *ArgType = ArgValue->getType();
2703     Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2704 
2705     llvm::Type *ResultType = ConvertType(E->getType());
2706     Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2707     Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2708     if (Result->getType() != ResultType)
2709       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2710                                      "cast");
2711     return RValue::get(Result);
2712   }
2713   case Builtin::BI__builtin_clzs:
2714   case Builtin::BI__builtin_clz:
2715   case Builtin::BI__builtin_clzl:
2716   case Builtin::BI__builtin_clzll: {
2717     Value *ArgValue = EmitCheckedArgForBuiltin(E->getArg(0), BCK_CLZPassedZero);
2718 
2719     llvm::Type *ArgType = ArgValue->getType();
2720     Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2721 
2722     llvm::Type *ResultType = ConvertType(E->getType());
2723     Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
2724     Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
2725     if (Result->getType() != ResultType)
2726       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2727                                      "cast");
2728     return RValue::get(Result);
2729   }
2730   case Builtin::BI__builtin_ffs:
2731   case Builtin::BI__builtin_ffsl:
2732   case Builtin::BI__builtin_ffsll: {
2733     // ffs(x) -> x ? cttz(x) + 1 : 0
2734     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2735 
2736     llvm::Type *ArgType = ArgValue->getType();
2737     Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
2738 
2739     llvm::Type *ResultType = ConvertType(E->getType());
2740     Value *Tmp =
2741         Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
2742                           llvm::ConstantInt::get(ArgType, 1));
2743     Value *Zero = llvm::Constant::getNullValue(ArgType);
2744     Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
2745     Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
2746     if (Result->getType() != ResultType)
2747       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2748                                      "cast");
2749     return RValue::get(Result);
2750   }
2751   case Builtin::BI__builtin_parity:
2752   case Builtin::BI__builtin_parityl:
2753   case Builtin::BI__builtin_parityll: {
2754     // parity(x) -> ctpop(x) & 1
2755     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2756 
2757     llvm::Type *ArgType = ArgValue->getType();
2758     Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2759 
2760     llvm::Type *ResultType = ConvertType(E->getType());
2761     Value *Tmp = Builder.CreateCall(F, ArgValue);
2762     Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
2763     if (Result->getType() != ResultType)
2764       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2765                                      "cast");
2766     return RValue::get(Result);
2767   }
2768   case Builtin::BI__lzcnt16:
2769   case Builtin::BI__lzcnt:
2770   case Builtin::BI__lzcnt64: {
2771     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2772 
2773     llvm::Type *ArgType = ArgValue->getType();
2774     Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
2775 
2776     llvm::Type *ResultType = ConvertType(E->getType());
2777     Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2778     if (Result->getType() != ResultType)
2779       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2780                                      "cast");
2781     return RValue::get(Result);
2782   }
2783   case Builtin::BI__popcnt16:
2784   case Builtin::BI__popcnt:
2785   case Builtin::BI__popcnt64:
2786   case Builtin::BI__builtin_popcount:
2787   case Builtin::BI__builtin_popcountl:
2788   case Builtin::BI__builtin_popcountll: {
2789     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2790 
2791     llvm::Type *ArgType = ArgValue->getType();
2792     Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
2793 
2794     llvm::Type *ResultType = ConvertType(E->getType());
2795     Value *Result = Builder.CreateCall(F, ArgValue);
2796     if (Result->getType() != ResultType)
2797       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
2798                                      "cast");
2799     return RValue::get(Result);
2800   }
2801   case Builtin::BI__builtin_unpredictable: {
2802     // Always return the argument of __builtin_unpredictable. LLVM does not
2803     // handle this builtin. Metadata for this builtin should be added directly
2804     // to instructions such as branches or switches that use it.
2805     return RValue::get(EmitScalarExpr(E->getArg(0)));
2806   }
2807   case Builtin::BI__builtin_expect: {
2808     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2809     llvm::Type *ArgType = ArgValue->getType();
2810 
2811     Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2812     // Don't generate llvm.expect on -O0 as the backend won't use it for
2813     // anything.
2814     // Note, we still IRGen ExpectedValue because it could have side-effects.
2815     if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2816       return RValue::get(ArgValue);
2817 
2818     Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
2819     Value *Result =
2820         Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
2821     return RValue::get(Result);
2822   }
2823   case Builtin::BI__builtin_expect_with_probability: {
2824     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2825     llvm::Type *ArgType = ArgValue->getType();
2826 
2827     Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
2828     llvm::APFloat Probability(0.0);
2829     const Expr *ProbArg = E->getArg(2);
2830     bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
2831     assert(EvalSucceed && "probability should be able to evaluate as float");
2832     (void)EvalSucceed;
2833     bool LoseInfo = false;
2834     Probability.convert(llvm::APFloat::IEEEdouble(),
2835                         llvm::RoundingMode::Dynamic, &LoseInfo);
2836     llvm::Type *Ty = ConvertType(ProbArg->getType());
2837     Constant *Confidence = ConstantFP::get(Ty, Probability);
2838     // Don't generate llvm.expect.with.probability on -O0 as the backend
2839     // won't use it for anything.
2840     // Note, we still IRGen ExpectedValue because it could have side-effects.
2841     if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2842       return RValue::get(ArgValue);
2843 
2844     Function *FnExpect =
2845         CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
2846     Value *Result = Builder.CreateCall(
2847         FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
2848     return RValue::get(Result);
2849   }
2850   case Builtin::BI__builtin_assume_aligned: {
2851     const Expr *Ptr = E->getArg(0);
2852     Value *PtrValue = EmitScalarExpr(Ptr);
2853     Value *OffsetValue =
2854       (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
2855 
2856     Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
2857     ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
2858     if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
2859       AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
2860                                      llvm::Value::MaximumAlignment);
2861 
2862     emitAlignmentAssumption(PtrValue, Ptr,
2863                             /*The expr loc is sufficient.*/ SourceLocation(),
2864                             AlignmentCI, OffsetValue);
2865     return RValue::get(PtrValue);
2866   }
2867   case Builtin::BI__assume:
2868   case Builtin::BI__builtin_assume: {
2869     if (E->getArg(0)->HasSideEffects(getContext()))
2870       return RValue::get(nullptr);
2871 
2872     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2873     Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
2874     return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
2875   }
2876   case Builtin::BI__arithmetic_fence: {
2877     // Create the builtin call if FastMath is selected, and the target
2878     // supports the builtin, otherwise just return the argument.
2879     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2880     llvm::FastMathFlags FMF = Builder.getFastMathFlags();
2881     bool isArithmeticFenceEnabled =
2882         FMF.allowReassoc() &&
2883         getContext().getTargetInfo().checkArithmeticFenceSupported();
2884     QualType ArgType = E->getArg(0)->getType();
2885     if (ArgType->isComplexType()) {
2886       if (isArithmeticFenceEnabled) {
2887         QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
2888         ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2889         Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
2890                                                     ConvertType(ElementType));
2891         Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
2892                                                     ConvertType(ElementType));
2893         return RValue::getComplex(std::make_pair(Real, Imag));
2894       }
2895       ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
2896       Value *Real = ComplexVal.first;
2897       Value *Imag = ComplexVal.second;
2898       return RValue::getComplex(std::make_pair(Real, Imag));
2899     }
2900     Value *ArgValue = EmitScalarExpr(E->getArg(0));
2901     if (isArithmeticFenceEnabled)
2902       return RValue::get(
2903           Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
2904     return RValue::get(ArgValue);
2905   }
2906   case Builtin::BI__builtin_bswap16:
2907   case Builtin::BI__builtin_bswap32:
2908   case Builtin::BI__builtin_bswap64: {
2909     return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
2910   }
2911   case Builtin::BI__builtin_bitreverse8:
2912   case Builtin::BI__builtin_bitreverse16:
2913   case Builtin::BI__builtin_bitreverse32:
2914   case Builtin::BI__builtin_bitreverse64: {
2915     return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bitreverse));
2916   }
2917   case Builtin::BI__builtin_rotateleft8:
2918   case Builtin::BI__builtin_rotateleft16:
2919   case Builtin::BI__builtin_rotateleft32:
2920   case Builtin::BI__builtin_rotateleft64:
2921   case Builtin::BI_rotl8: // Microsoft variants of rotate left
2922   case Builtin::BI_rotl16:
2923   case Builtin::BI_rotl:
2924   case Builtin::BI_lrotl:
2925   case Builtin::BI_rotl64:
2926     return emitRotate(E, false);
2927 
2928   case Builtin::BI__builtin_rotateright8:
2929   case Builtin::BI__builtin_rotateright16:
2930   case Builtin::BI__builtin_rotateright32:
2931   case Builtin::BI__builtin_rotateright64:
2932   case Builtin::BI_rotr8: // Microsoft variants of rotate right
2933   case Builtin::BI_rotr16:
2934   case Builtin::BI_rotr:
2935   case Builtin::BI_lrotr:
2936   case Builtin::BI_rotr64:
2937     return emitRotate(E, true);
2938 
2939   case Builtin::BI__builtin_constant_p: {
2940     llvm::Type *ResultType = ConvertType(E->getType());
2941 
2942     const Expr *Arg = E->getArg(0);
2943     QualType ArgType = Arg->getType();
2944     // FIXME: The allowance for Obj-C pointers and block pointers is historical
2945     // and likely a mistake.
2946     if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
2947         !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
2948       // Per the GCC documentation, only numeric constants are recognized after
2949       // inlining.
2950       return RValue::get(ConstantInt::get(ResultType, 0));
2951 
2952     if (Arg->HasSideEffects(getContext()))
2953       // The argument is unevaluated, so be conservative if it might have
2954       // side-effects.
2955       return RValue::get(ConstantInt::get(ResultType, 0));
2956 
2957     Value *ArgValue = EmitScalarExpr(Arg);
2958     if (ArgType->isObjCObjectPointerType()) {
2959       // Convert Objective-C objects to id because we cannot distinguish between
2960       // LLVM types for Obj-C classes as they are opaque.
2961       ArgType = CGM.getContext().getObjCIdType();
2962       ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
2963     }
2964     Function *F =
2965         CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
2966     Value *Result = Builder.CreateCall(F, ArgValue);
2967     if (Result->getType() != ResultType)
2968       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
2969     return RValue::get(Result);
2970   }
2971   case Builtin::BI__builtin_dynamic_object_size:
2972   case Builtin::BI__builtin_object_size: {
2973     unsigned Type =
2974         E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
2975     auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
2976 
2977     // We pass this builtin onto the optimizer so that it can figure out the
2978     // object size in more complex cases.
2979     bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
2980     return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
2981                                              /*EmittedE=*/nullptr, IsDynamic));
2982   }
2983   case Builtin::BI__builtin_prefetch: {
2984     Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
2985     // FIXME: Technically these constants should of type 'int', yes?
2986     RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
2987       llvm::ConstantInt::get(Int32Ty, 0);
2988     Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
2989       llvm::ConstantInt::get(Int32Ty, 3);
2990     Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
2991     Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
2992     return RValue::get(Builder.CreateCall(F, {Address, RW, Locality, Data}));
2993   }
2994   case Builtin::BI__builtin_readcyclecounter: {
2995     Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
2996     return RValue::get(Builder.CreateCall(F));
2997   }
2998   case Builtin::BI__builtin___clear_cache: {
2999     Value *Begin = EmitScalarExpr(E->getArg(0));
3000     Value *End = EmitScalarExpr(E->getArg(1));
3001     Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3002     return RValue::get(Builder.CreateCall(F, {Begin, End}));
3003   }
3004   case Builtin::BI__builtin_trap:
3005     return RValue::get(EmitTrapCall(Intrinsic::trap));
3006   case Builtin::BI__debugbreak:
3007     return RValue::get(EmitTrapCall(Intrinsic::debugtrap));
3008   case Builtin::BI__builtin_unreachable: {
3009     EmitUnreachable(E->getExprLoc());
3010 
3011     // We do need to preserve an insertion point.
3012     EmitBlock(createBasicBlock("unreachable.cont"));
3013 
3014     return RValue::get(nullptr);
3015   }
3016 
3017   case Builtin::BI__builtin_powi:
3018   case Builtin::BI__builtin_powif:
3019   case Builtin::BI__builtin_powil: {
3020     llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3021     llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3022 
3023     if (Builder.getIsFPConstrained()) {
3024       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3025       Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3026                                      Src0->getType());
3027       return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3028     }
3029 
3030     Function *F = CGM.getIntrinsic(Intrinsic::powi,
3031                                    { Src0->getType(), Src1->getType() });
3032     return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3033   }
3034   case Builtin::BI__builtin_isgreater:
3035   case Builtin::BI__builtin_isgreaterequal:
3036   case Builtin::BI__builtin_isless:
3037   case Builtin::BI__builtin_islessequal:
3038   case Builtin::BI__builtin_islessgreater:
3039   case Builtin::BI__builtin_isunordered: {
3040     // Ordered comparisons: we know the arguments to these are matching scalar
3041     // floating point values.
3042     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3043     // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3044     Value *LHS = EmitScalarExpr(E->getArg(0));
3045     Value *RHS = EmitScalarExpr(E->getArg(1));
3046 
3047     switch (BuiltinID) {
3048     default: llvm_unreachable("Unknown ordered comparison");
3049     case Builtin::BI__builtin_isgreater:
3050       LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3051       break;
3052     case Builtin::BI__builtin_isgreaterequal:
3053       LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3054       break;
3055     case Builtin::BI__builtin_isless:
3056       LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3057       break;
3058     case Builtin::BI__builtin_islessequal:
3059       LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3060       break;
3061     case Builtin::BI__builtin_islessgreater:
3062       LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3063       break;
3064     case Builtin::BI__builtin_isunordered:
3065       LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3066       break;
3067     }
3068     // ZExt bool to int type.
3069     return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3070   }
3071   case Builtin::BI__builtin_isnan: {
3072     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3073     Value *V = EmitScalarExpr(E->getArg(0));
3074     llvm::Type *Ty = V->getType();
3075     const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
3076     if (!Builder.getIsFPConstrained() ||
3077         Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
3078         !Ty->isIEEE()) {
3079       V = Builder.CreateFCmpUNO(V, V, "cmp");
3080       return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3081     }
3082 
3083     if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
3084       return RValue::get(Result);
3085 
3086     // NaN has all exp bits set and a non zero significand. Therefore:
3087     // isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
3088     unsigned bitsize = Ty->getScalarSizeInBits();
3089     llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
3090     Value *IntV = Builder.CreateBitCast(V, IntTy);
3091     APInt AndMask = APInt::getSignedMaxValue(bitsize);
3092     Value *AbsV =
3093         Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
3094     APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
3095     Value *Sub =
3096         Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
3097     // V = sign bit (Sub) <=> V = (Sub < 0)
3098     V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
3099     if (bitsize > 32)
3100       V = Builder.CreateTrunc(V, ConvertType(E->getType()));
3101     return RValue::get(V);
3102   }
3103 
3104   case Builtin::BI__builtin_elementwise_abs: {
3105     Value *Op0 = EmitScalarExpr(E->getArg(0));
3106     Value *Result;
3107     if (Op0->getType()->isIntOrIntVectorTy())
3108       Result = Builder.CreateBinaryIntrinsic(
3109           llvm::Intrinsic::abs, Op0, Builder.getFalse(), nullptr, "elt.abs");
3110     else
3111       Result = Builder.CreateUnaryIntrinsic(llvm::Intrinsic::fabs, Op0, nullptr,
3112                                             "elt.abs");
3113     return RValue::get(Result);
3114   }
3115   case Builtin::BI__builtin_elementwise_max: {
3116     Value *Op0 = EmitScalarExpr(E->getArg(0));
3117     Value *Op1 = EmitScalarExpr(E->getArg(1));
3118     Value *Result;
3119     if (Op0->getType()->isIntOrIntVectorTy()) {
3120       QualType Ty = E->getArg(0)->getType();
3121       if (auto *VecTy = Ty->getAs<VectorType>())
3122         Ty = VecTy->getElementType();
3123       Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
3124                                                  ? llvm::Intrinsic::smax
3125                                                  : llvm::Intrinsic::umax,
3126                                              Op0, Op1, nullptr, "elt.max");
3127     } else
3128       Result = Builder.CreateMaxNum(Op0, Op1, "elt.max");
3129     return RValue::get(Result);
3130   }
3131   case Builtin::BI__builtin_elementwise_min: {
3132     Value *Op0 = EmitScalarExpr(E->getArg(0));
3133     Value *Op1 = EmitScalarExpr(E->getArg(1));
3134     Value *Result;
3135     if (Op0->getType()->isIntOrIntVectorTy()) {
3136       QualType Ty = E->getArg(0)->getType();
3137       if (auto *VecTy = Ty->getAs<VectorType>())
3138         Ty = VecTy->getElementType();
3139       Result = Builder.CreateBinaryIntrinsic(Ty->isSignedIntegerType()
3140                                                  ? llvm::Intrinsic::smin
3141                                                  : llvm::Intrinsic::umin,
3142                                              Op0, Op1, nullptr, "elt.min");
3143     } else
3144       Result = Builder.CreateMinNum(Op0, Op1, "elt.min");
3145     return RValue::get(Result);
3146   }
3147 
3148   case Builtin::BI__builtin_reduce_max: {
3149     auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
3150       if (IrTy->isIntOrIntVectorTy()) {
3151         if (auto *VecTy = QT->getAs<VectorType>())
3152           QT = VecTy->getElementType();
3153         if (QT->isSignedIntegerType())
3154           return llvm::Intrinsic::vector_reduce_smax;
3155         else
3156           return llvm::Intrinsic::vector_reduce_umax;
3157       }
3158       return llvm::Intrinsic::vector_reduce_fmax;
3159     };
3160     Value *Op0 = EmitScalarExpr(E->getArg(0));
3161     Value *Result = Builder.CreateUnaryIntrinsic(
3162         GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
3163         "rdx.min");
3164     return RValue::get(Result);
3165   }
3166 
3167   case Builtin::BI__builtin_reduce_min: {
3168     auto GetIntrinsicID = [](QualType QT, llvm::Type *IrTy) {
3169       if (IrTy->isIntOrIntVectorTy()) {
3170         if (auto *VecTy = QT->getAs<VectorType>())
3171           QT = VecTy->getElementType();
3172         if (QT->isSignedIntegerType())
3173           return llvm::Intrinsic::vector_reduce_smin;
3174         else
3175           return llvm::Intrinsic::vector_reduce_umin;
3176       }
3177       return llvm::Intrinsic::vector_reduce_fmin;
3178     };
3179     Value *Op0 = EmitScalarExpr(E->getArg(0));
3180     Value *Result = Builder.CreateUnaryIntrinsic(
3181         GetIntrinsicID(E->getArg(0)->getType(), Op0->getType()), Op0, nullptr,
3182         "rdx.min");
3183     return RValue::get(Result);
3184   }
3185 
3186   case Builtin::BI__builtin_matrix_transpose: {
3187     const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
3188     Value *MatValue = EmitScalarExpr(E->getArg(0));
3189     MatrixBuilder<CGBuilderTy> MB(Builder);
3190     Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
3191                                              MatrixTy->getNumColumns());
3192     return RValue::get(Result);
3193   }
3194 
3195   case Builtin::BI__builtin_matrix_column_major_load: {
3196     MatrixBuilder<CGBuilderTy> MB(Builder);
3197     // Emit everything that isn't dependent on the first parameter type
3198     Value *Stride = EmitScalarExpr(E->getArg(3));
3199     const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
3200     auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
3201     assert(PtrTy && "arg0 must be of pointer type");
3202     bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3203 
3204     Address Src = EmitPointerWithAlignment(E->getArg(0));
3205     EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
3206                         E->getArg(0)->getExprLoc(), FD, 0);
3207     Value *Result = MB.CreateColumnMajorLoad(
3208         Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
3209         IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
3210         "matrix");
3211     return RValue::get(Result);
3212   }
3213 
3214   case Builtin::BI__builtin_matrix_column_major_store: {
3215     MatrixBuilder<CGBuilderTy> MB(Builder);
3216     Value *Matrix = EmitScalarExpr(E->getArg(0));
3217     Address Dst = EmitPointerWithAlignment(E->getArg(1));
3218     Value *Stride = EmitScalarExpr(E->getArg(2));
3219 
3220     const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
3221     auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
3222     assert(PtrTy && "arg1 must be of pointer type");
3223     bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
3224 
3225     EmitNonNullArgCheck(RValue::get(Dst.getPointer()), E->getArg(1)->getType(),
3226                         E->getArg(1)->getExprLoc(), FD, 0);
3227     Value *Result = MB.CreateColumnMajorStore(
3228         Matrix, Dst.getPointer(), Align(Dst.getAlignment().getQuantity()),
3229         Stride, IsVolatile, MatrixTy->getNumRows(), MatrixTy->getNumColumns());
3230     return RValue::get(Result);
3231   }
3232 
3233   case Builtin::BIfinite:
3234   case Builtin::BI__finite:
3235   case Builtin::BIfinitef:
3236   case Builtin::BI__finitef:
3237   case Builtin::BIfinitel:
3238   case Builtin::BI__finitel:
3239   case Builtin::BI__builtin_isinf:
3240   case Builtin::BI__builtin_isfinite: {
3241     // isinf(x)    --> fabs(x) == infinity
3242     // isfinite(x) --> fabs(x) != infinity
3243     // x != NaN via the ordered compare in either case.
3244     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3245     Value *V = EmitScalarExpr(E->getArg(0));
3246     llvm::Type *Ty = V->getType();
3247     if (!Builder.getIsFPConstrained() ||
3248         Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
3249         !Ty->isIEEE()) {
3250       Value *Fabs = EmitFAbs(*this, V);
3251       Constant *Infinity = ConstantFP::getInfinity(V->getType());
3252       CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
3253                                     ? CmpInst::FCMP_OEQ
3254                                     : CmpInst::FCMP_ONE;
3255       Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
3256       return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
3257     }
3258 
3259     if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
3260       return RValue::get(Result);
3261 
3262     // Inf values have all exp bits set and a zero significand. Therefore:
3263     // isinf(V) == ((V << 1) == ((exp mask) << 1))
3264     // isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison
3265     unsigned bitsize = Ty->getScalarSizeInBits();
3266     llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
3267     Value *IntV = Builder.CreateBitCast(V, IntTy);
3268     Value *Shl1 = Builder.CreateShl(IntV, 1);
3269     const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
3270     APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
3271     Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1));
3272     if (BuiltinID == Builtin::BI__builtin_isinf)
3273       V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1);
3274     else
3275       V = Builder.CreateICmpULT(Shl1, ExpMaskShl1);
3276     return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3277   }
3278 
3279   case Builtin::BI__builtin_isinf_sign: {
3280     // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
3281     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3282     // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3283     Value *Arg = EmitScalarExpr(E->getArg(0));
3284     Value *AbsArg = EmitFAbs(*this, Arg);
3285     Value *IsInf = Builder.CreateFCmpOEQ(
3286         AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
3287     Value *IsNeg = EmitSignBit(*this, Arg);
3288 
3289     llvm::Type *IntTy = ConvertType(E->getType());
3290     Value *Zero = Constant::getNullValue(IntTy);
3291     Value *One = ConstantInt::get(IntTy, 1);
3292     Value *NegativeOne = ConstantInt::get(IntTy, -1);
3293     Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
3294     Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
3295     return RValue::get(Result);
3296   }
3297 
3298   case Builtin::BI__builtin_isnormal: {
3299     // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
3300     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3301     // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3302     Value *V = EmitScalarExpr(E->getArg(0));
3303     Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
3304 
3305     Value *Abs = EmitFAbs(*this, V);
3306     Value *IsLessThanInf =
3307       Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
3308     APFloat Smallest = APFloat::getSmallestNormalized(
3309                    getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
3310     Value *IsNormal =
3311       Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
3312                             "isnormal");
3313     V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
3314     V = Builder.CreateAnd(V, IsNormal, "and");
3315     return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
3316   }
3317 
3318   case Builtin::BI__builtin_flt_rounds: {
3319     Function *F = CGM.getIntrinsic(Intrinsic::flt_rounds);
3320 
3321     llvm::Type *ResultType = ConvertType(E->getType());
3322     Value *Result = Builder.CreateCall(F);
3323     if (Result->getType() != ResultType)
3324       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3325                                      "cast");
3326     return RValue::get(Result);
3327   }
3328 
3329   case Builtin::BI__builtin_fpclassify: {
3330     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3331     // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
3332     Value *V = EmitScalarExpr(E->getArg(5));
3333     llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
3334 
3335     // Create Result
3336     BasicBlock *Begin = Builder.GetInsertBlock();
3337     BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
3338     Builder.SetInsertPoint(End);
3339     PHINode *Result =
3340       Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
3341                         "fpclassify_result");
3342 
3343     // if (V==0) return FP_ZERO
3344     Builder.SetInsertPoint(Begin);
3345     Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
3346                                           "iszero");
3347     Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
3348     BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
3349     Builder.CreateCondBr(IsZero, End, NotZero);
3350     Result->addIncoming(ZeroLiteral, Begin);
3351 
3352     // if (V != V) return FP_NAN
3353     Builder.SetInsertPoint(NotZero);
3354     Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
3355     Value *NanLiteral = EmitScalarExpr(E->getArg(0));
3356     BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
3357     Builder.CreateCondBr(IsNan, End, NotNan);
3358     Result->addIncoming(NanLiteral, NotZero);
3359 
3360     // if (fabs(V) == infinity) return FP_INFINITY
3361     Builder.SetInsertPoint(NotNan);
3362     Value *VAbs = EmitFAbs(*this, V);
3363     Value *IsInf =
3364       Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
3365                             "isinf");
3366     Value *InfLiteral = EmitScalarExpr(E->getArg(1));
3367     BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
3368     Builder.CreateCondBr(IsInf, End, NotInf);
3369     Result->addIncoming(InfLiteral, NotNan);
3370 
3371     // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
3372     Builder.SetInsertPoint(NotInf);
3373     APFloat Smallest = APFloat::getSmallestNormalized(
3374         getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
3375     Value *IsNormal =
3376       Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
3377                             "isnormal");
3378     Value *NormalResult =
3379       Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
3380                            EmitScalarExpr(E->getArg(3)));
3381     Builder.CreateBr(End);
3382     Result->addIncoming(NormalResult, NotInf);
3383 
3384     // return Result
3385     Builder.SetInsertPoint(End);
3386     return RValue::get(Result);
3387   }
3388 
3389   case Builtin::BIalloca:
3390   case Builtin::BI_alloca:
3391   case Builtin::BI__builtin_alloca: {
3392     Value *Size = EmitScalarExpr(E->getArg(0));
3393     const TargetInfo &TI = getContext().getTargetInfo();
3394     // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
3395     const Align SuitableAlignmentInBytes =
3396         CGM.getContext()
3397             .toCharUnitsFromBits(TI.getSuitableAlign())
3398             .getAsAlign();
3399     AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3400     AI->setAlignment(SuitableAlignmentInBytes);
3401     initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
3402     return RValue::get(AI);
3403   }
3404 
3405   case Builtin::BI__builtin_alloca_with_align: {
3406     Value *Size = EmitScalarExpr(E->getArg(0));
3407     Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
3408     auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
3409     unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
3410     const Align AlignmentInBytes =
3411         CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
3412     AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
3413     AI->setAlignment(AlignmentInBytes);
3414     initializeAlloca(*this, AI, Size, AlignmentInBytes);
3415     return RValue::get(AI);
3416   }
3417 
3418   case Builtin::BIbzero:
3419   case Builtin::BI__builtin_bzero: {
3420     Address Dest = EmitPointerWithAlignment(E->getArg(0));
3421     Value *SizeVal = EmitScalarExpr(E->getArg(1));
3422     EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3423                         E->getArg(0)->getExprLoc(), FD, 0);
3424     Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
3425     return RValue::get(nullptr);
3426   }
3427   case Builtin::BImemcpy:
3428   case Builtin::BI__builtin_memcpy:
3429   case Builtin::BImempcpy:
3430   case Builtin::BI__builtin_mempcpy: {
3431     Address Dest = EmitPointerWithAlignment(E->getArg(0));
3432     Address Src = EmitPointerWithAlignment(E->getArg(1));
3433     Value *SizeVal = EmitScalarExpr(E->getArg(2));
3434     EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3435                         E->getArg(0)->getExprLoc(), FD, 0);
3436     EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3437                         E->getArg(1)->getExprLoc(), FD, 1);
3438     Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3439     if (BuiltinID == Builtin::BImempcpy ||
3440         BuiltinID == Builtin::BI__builtin_mempcpy)
3441       return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
3442                                                    Dest.getPointer(), SizeVal));
3443     else
3444       return RValue::get(Dest.getPointer());
3445   }
3446 
3447   case Builtin::BI__builtin_memcpy_inline: {
3448     Address Dest = EmitPointerWithAlignment(E->getArg(0));
3449     Address Src = EmitPointerWithAlignment(E->getArg(1));
3450     uint64_t Size =
3451         E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
3452     EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3453                         E->getArg(0)->getExprLoc(), FD, 0);
3454     EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3455                         E->getArg(1)->getExprLoc(), FD, 1);
3456     Builder.CreateMemCpyInline(Dest, Src, Size);
3457     return RValue::get(nullptr);
3458   }
3459 
3460   case Builtin::BI__builtin_char_memchr:
3461     BuiltinID = Builtin::BI__builtin_memchr;
3462     break;
3463 
3464   case Builtin::BI__builtin___memcpy_chk: {
3465     // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
3466     Expr::EvalResult SizeResult, DstSizeResult;
3467     if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3468         !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3469       break;
3470     llvm::APSInt Size = SizeResult.Val.getInt();
3471     llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3472     if (Size.ugt(DstSize))
3473       break;
3474     Address Dest = EmitPointerWithAlignment(E->getArg(0));
3475     Address Src = EmitPointerWithAlignment(E->getArg(1));
3476     Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3477     Builder.CreateMemCpy(Dest, Src, SizeVal, false);
3478     return RValue::get(Dest.getPointer());
3479   }
3480 
3481   case Builtin::BI__builtin_objc_memmove_collectable: {
3482     Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
3483     Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
3484     Value *SizeVal = EmitScalarExpr(E->getArg(2));
3485     CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
3486                                                   DestAddr, SrcAddr, SizeVal);
3487     return RValue::get(DestAddr.getPointer());
3488   }
3489 
3490   case Builtin::BI__builtin___memmove_chk: {
3491     // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
3492     Expr::EvalResult SizeResult, DstSizeResult;
3493     if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3494         !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3495       break;
3496     llvm::APSInt Size = SizeResult.Val.getInt();
3497     llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3498     if (Size.ugt(DstSize))
3499       break;
3500     Address Dest = EmitPointerWithAlignment(E->getArg(0));
3501     Address Src = EmitPointerWithAlignment(E->getArg(1));
3502     Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3503     Builder.CreateMemMove(Dest, Src, SizeVal, false);
3504     return RValue::get(Dest.getPointer());
3505   }
3506 
3507   case Builtin::BImemmove:
3508   case Builtin::BI__builtin_memmove: {
3509     Address Dest = EmitPointerWithAlignment(E->getArg(0));
3510     Address Src = EmitPointerWithAlignment(E->getArg(1));
3511     Value *SizeVal = EmitScalarExpr(E->getArg(2));
3512     EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3513                         E->getArg(0)->getExprLoc(), FD, 0);
3514     EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(1)->getType(),
3515                         E->getArg(1)->getExprLoc(), FD, 1);
3516     Builder.CreateMemMove(Dest, Src, SizeVal, false);
3517     return RValue::get(Dest.getPointer());
3518   }
3519   case Builtin::BImemset:
3520   case Builtin::BI__builtin_memset: {
3521     Address Dest = EmitPointerWithAlignment(E->getArg(0));
3522     Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3523                                          Builder.getInt8Ty());
3524     Value *SizeVal = EmitScalarExpr(E->getArg(2));
3525     EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
3526                         E->getArg(0)->getExprLoc(), FD, 0);
3527     Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3528     return RValue::get(Dest.getPointer());
3529   }
3530   case Builtin::BI__builtin___memset_chk: {
3531     // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
3532     Expr::EvalResult SizeResult, DstSizeResult;
3533     if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
3534         !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
3535       break;
3536     llvm::APSInt Size = SizeResult.Val.getInt();
3537     llvm::APSInt DstSize = DstSizeResult.Val.getInt();
3538     if (Size.ugt(DstSize))
3539       break;
3540     Address Dest = EmitPointerWithAlignment(E->getArg(0));
3541     Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
3542                                          Builder.getInt8Ty());
3543     Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
3544     Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
3545     return RValue::get(Dest.getPointer());
3546   }
3547   case Builtin::BI__builtin_wmemchr: {
3548     // The MSVC runtime library does not provide a definition of wmemchr, so we
3549     // need an inline implementation.
3550     if (!getTarget().getTriple().isOSMSVCRT())
3551       break;
3552 
3553     llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3554     Value *Str = EmitScalarExpr(E->getArg(0));
3555     Value *Chr = EmitScalarExpr(E->getArg(1));
3556     Value *Size = EmitScalarExpr(E->getArg(2));
3557 
3558     BasicBlock *Entry = Builder.GetInsertBlock();
3559     BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
3560     BasicBlock *Next = createBasicBlock("wmemchr.next");
3561     BasicBlock *Exit = createBasicBlock("wmemchr.exit");
3562     Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3563     Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
3564 
3565     EmitBlock(CmpEq);
3566     PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
3567     StrPhi->addIncoming(Str, Entry);
3568     PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3569     SizePhi->addIncoming(Size, Entry);
3570     CharUnits WCharAlign =
3571         getContext().getTypeAlignInChars(getContext().WCharTy);
3572     Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
3573     Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
3574     Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
3575     Builder.CreateCondBr(StrEqChr, Exit, Next);
3576 
3577     EmitBlock(Next);
3578     Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
3579     Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3580     Value *NextSizeEq0 =
3581         Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3582     Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
3583     StrPhi->addIncoming(NextStr, Next);
3584     SizePhi->addIncoming(NextSize, Next);
3585 
3586     EmitBlock(Exit);
3587     PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
3588     Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
3589     Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
3590     Ret->addIncoming(FoundChr, CmpEq);
3591     return RValue::get(Ret);
3592   }
3593   case Builtin::BI__builtin_wmemcmp: {
3594     // The MSVC runtime library does not provide a definition of wmemcmp, so we
3595     // need an inline implementation.
3596     if (!getTarget().getTriple().isOSMSVCRT())
3597       break;
3598 
3599     llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
3600 
3601     Value *Dst = EmitScalarExpr(E->getArg(0));
3602     Value *Src = EmitScalarExpr(E->getArg(1));
3603     Value *Size = EmitScalarExpr(E->getArg(2));
3604 
3605     BasicBlock *Entry = Builder.GetInsertBlock();
3606     BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
3607     BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
3608     BasicBlock *Next = createBasicBlock("wmemcmp.next");
3609     BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
3610     Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
3611     Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
3612 
3613     EmitBlock(CmpGT);
3614     PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
3615     DstPhi->addIncoming(Dst, Entry);
3616     PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
3617     SrcPhi->addIncoming(Src, Entry);
3618     PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
3619     SizePhi->addIncoming(Size, Entry);
3620     CharUnits WCharAlign =
3621         getContext().getTypeAlignInChars(getContext().WCharTy);
3622     Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
3623     Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
3624     Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
3625     Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
3626 
3627     EmitBlock(CmpLT);
3628     Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
3629     Builder.CreateCondBr(DstLtSrc, Exit, Next);
3630 
3631     EmitBlock(Next);
3632     Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
3633     Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
3634     Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
3635     Value *NextSizeEq0 =
3636         Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
3637     Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
3638     DstPhi->addIncoming(NextDst, Next);
3639     SrcPhi->addIncoming(NextSrc, Next);
3640     SizePhi->addIncoming(NextSize, Next);
3641 
3642     EmitBlock(Exit);
3643     PHINode *Ret = Builder.CreatePHI(IntTy, 4);
3644     Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
3645     Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
3646     Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
3647     Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
3648     return RValue::get(Ret);
3649   }
3650   case Builtin::BI__builtin_dwarf_cfa: {
3651     // The offset in bytes from the first argument to the CFA.
3652     //
3653     // Why on earth is this in the frontend?  Is there any reason at
3654     // all that the backend can't reasonably determine this while
3655     // lowering llvm.eh.dwarf.cfa()?
3656     //
3657     // TODO: If there's a satisfactory reason, add a target hook for
3658     // this instead of hard-coding 0, which is correct for most targets.
3659     int32_t Offset = 0;
3660 
3661     Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
3662     return RValue::get(Builder.CreateCall(F,
3663                                       llvm::ConstantInt::get(Int32Ty, Offset)));
3664   }
3665   case Builtin::BI__builtin_return_address: {
3666     Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3667                                                    getContext().UnsignedIntTy);
3668     Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3669     return RValue::get(Builder.CreateCall(F, Depth));
3670   }
3671   case Builtin::BI_ReturnAddress: {
3672     Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
3673     return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
3674   }
3675   case Builtin::BI__builtin_frame_address: {
3676     Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
3677                                                    getContext().UnsignedIntTy);
3678     Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
3679     return RValue::get(Builder.CreateCall(F, Depth));
3680   }
3681   case Builtin::BI__builtin_extract_return_addr: {
3682     Value *Address = EmitScalarExpr(E->getArg(0));
3683     Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
3684     return RValue::get(Result);
3685   }
3686   case Builtin::BI__builtin_frob_return_addr: {
3687     Value *Address = EmitScalarExpr(E->getArg(0));
3688     Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
3689     return RValue::get(Result);
3690   }
3691   case Builtin::BI__builtin_dwarf_sp_column: {
3692     llvm::IntegerType *Ty
3693       = cast<llvm::IntegerType>(ConvertType(E->getType()));
3694     int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
3695     if (Column == -1) {
3696       CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
3697       return RValue::get(llvm::UndefValue::get(Ty));
3698     }
3699     return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
3700   }
3701   case Builtin::BI__builtin_init_dwarf_reg_size_table: {
3702     Value *Address = EmitScalarExpr(E->getArg(0));
3703     if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
3704       CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
3705     return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
3706   }
3707   case Builtin::BI__builtin_eh_return: {
3708     Value *Int = EmitScalarExpr(E->getArg(0));
3709     Value *Ptr = EmitScalarExpr(E->getArg(1));
3710 
3711     llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
3712     assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
3713            "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
3714     Function *F =
3715         CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
3716                                                     : Intrinsic::eh_return_i64);
3717     Builder.CreateCall(F, {Int, Ptr});
3718     Builder.CreateUnreachable();
3719 
3720     // We do need to preserve an insertion point.
3721     EmitBlock(createBasicBlock("builtin_eh_return.cont"));
3722 
3723     return RValue::get(nullptr);
3724   }
3725   case Builtin::BI__builtin_unwind_init: {
3726     Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
3727     return RValue::get(Builder.CreateCall(F));
3728   }
3729   case Builtin::BI__builtin_extend_pointer: {
3730     // Extends a pointer to the size of an _Unwind_Word, which is
3731     // uint64_t on all platforms.  Generally this gets poked into a
3732     // register and eventually used as an address, so if the
3733     // addressing registers are wider than pointers and the platform
3734     // doesn't implicitly ignore high-order bits when doing
3735     // addressing, we need to make sure we zext / sext based on
3736     // the platform's expectations.
3737     //
3738     // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
3739 
3740     // Cast the pointer to intptr_t.
3741     Value *Ptr = EmitScalarExpr(E->getArg(0));
3742     Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
3743 
3744     // If that's 64 bits, we're done.
3745     if (IntPtrTy->getBitWidth() == 64)
3746       return RValue::get(Result);
3747 
3748     // Otherwise, ask the codegen data what to do.
3749     if (getTargetHooks().extendPointerWithSExt())
3750       return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
3751     else
3752       return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
3753   }
3754   case Builtin::BI__builtin_setjmp: {
3755     // Buffer is a void**.
3756     Address Buf = EmitPointerWithAlignment(E->getArg(0));
3757 
3758     // Store the frame pointer to the setjmp buffer.
3759     Value *FrameAddr = Builder.CreateCall(
3760         CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
3761         ConstantInt::get(Int32Ty, 0));
3762     Builder.CreateStore(FrameAddr, Buf);
3763 
3764     // Store the stack pointer to the setjmp buffer.
3765     Value *StackAddr =
3766         Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
3767     Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
3768     Builder.CreateStore(StackAddr, StackSaveSlot);
3769 
3770     // Call LLVM's EH setjmp, which is lightweight.
3771     Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
3772     Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3773     return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
3774   }
3775   case Builtin::BI__builtin_longjmp: {
3776     Value *Buf = EmitScalarExpr(E->getArg(0));
3777     Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
3778 
3779     // Call LLVM's EH longjmp, which is lightweight.
3780     Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
3781 
3782     // longjmp doesn't return; mark this as unreachable.
3783     Builder.CreateUnreachable();
3784 
3785     // We do need to preserve an insertion point.
3786     EmitBlock(createBasicBlock("longjmp.cont"));
3787 
3788     return RValue::get(nullptr);
3789   }
3790   case Builtin::BI__builtin_launder: {
3791     const Expr *Arg = E->getArg(0);
3792     QualType ArgTy = Arg->getType()->getPointeeType();
3793     Value *Ptr = EmitScalarExpr(Arg);
3794     if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
3795       Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
3796 
3797     return RValue::get(Ptr);
3798   }
3799   case Builtin::BI__sync_fetch_and_add:
3800   case Builtin::BI__sync_fetch_and_sub:
3801   case Builtin::BI__sync_fetch_and_or:
3802   case Builtin::BI__sync_fetch_and_and:
3803   case Builtin::BI__sync_fetch_and_xor:
3804   case Builtin::BI__sync_fetch_and_nand:
3805   case Builtin::BI__sync_add_and_fetch:
3806   case Builtin::BI__sync_sub_and_fetch:
3807   case Builtin::BI__sync_and_and_fetch:
3808   case Builtin::BI__sync_or_and_fetch:
3809   case Builtin::BI__sync_xor_and_fetch:
3810   case Builtin::BI__sync_nand_and_fetch:
3811   case Builtin::BI__sync_val_compare_and_swap:
3812   case Builtin::BI__sync_bool_compare_and_swap:
3813   case Builtin::BI__sync_lock_test_and_set:
3814   case Builtin::BI__sync_lock_release:
3815   case Builtin::BI__sync_swap:
3816     llvm_unreachable("Shouldn't make it through sema");
3817   case Builtin::BI__sync_fetch_and_add_1:
3818   case Builtin::BI__sync_fetch_and_add_2:
3819   case Builtin::BI__sync_fetch_and_add_4:
3820   case Builtin::BI__sync_fetch_and_add_8:
3821   case Builtin::BI__sync_fetch_and_add_16:
3822     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
3823   case Builtin::BI__sync_fetch_and_sub_1:
3824   case Builtin::BI__sync_fetch_and_sub_2:
3825   case Builtin::BI__sync_fetch_and_sub_4:
3826   case Builtin::BI__sync_fetch_and_sub_8:
3827   case Builtin::BI__sync_fetch_and_sub_16:
3828     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
3829   case Builtin::BI__sync_fetch_and_or_1:
3830   case Builtin::BI__sync_fetch_and_or_2:
3831   case Builtin::BI__sync_fetch_and_or_4:
3832   case Builtin::BI__sync_fetch_and_or_8:
3833   case Builtin::BI__sync_fetch_and_or_16:
3834     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
3835   case Builtin::BI__sync_fetch_and_and_1:
3836   case Builtin::BI__sync_fetch_and_and_2:
3837   case Builtin::BI__sync_fetch_and_and_4:
3838   case Builtin::BI__sync_fetch_and_and_8:
3839   case Builtin::BI__sync_fetch_and_and_16:
3840     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
3841   case Builtin::BI__sync_fetch_and_xor_1:
3842   case Builtin::BI__sync_fetch_and_xor_2:
3843   case Builtin::BI__sync_fetch_and_xor_4:
3844   case Builtin::BI__sync_fetch_and_xor_8:
3845   case Builtin::BI__sync_fetch_and_xor_16:
3846     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
3847   case Builtin::BI__sync_fetch_and_nand_1:
3848   case Builtin::BI__sync_fetch_and_nand_2:
3849   case Builtin::BI__sync_fetch_and_nand_4:
3850   case Builtin::BI__sync_fetch_and_nand_8:
3851   case Builtin::BI__sync_fetch_and_nand_16:
3852     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
3853 
3854   // Clang extensions: not overloaded yet.
3855   case Builtin::BI__sync_fetch_and_min:
3856     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
3857   case Builtin::BI__sync_fetch_and_max:
3858     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
3859   case Builtin::BI__sync_fetch_and_umin:
3860     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
3861   case Builtin::BI__sync_fetch_and_umax:
3862     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
3863 
3864   case Builtin::BI__sync_add_and_fetch_1:
3865   case Builtin::BI__sync_add_and_fetch_2:
3866   case Builtin::BI__sync_add_and_fetch_4:
3867   case Builtin::BI__sync_add_and_fetch_8:
3868   case Builtin::BI__sync_add_and_fetch_16:
3869     return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
3870                                 llvm::Instruction::Add);
3871   case Builtin::BI__sync_sub_and_fetch_1:
3872   case Builtin::BI__sync_sub_and_fetch_2:
3873   case Builtin::BI__sync_sub_and_fetch_4:
3874   case Builtin::BI__sync_sub_and_fetch_8:
3875   case Builtin::BI__sync_sub_and_fetch_16:
3876     return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
3877                                 llvm::Instruction::Sub);
3878   case Builtin::BI__sync_and_and_fetch_1:
3879   case Builtin::BI__sync_and_and_fetch_2:
3880   case Builtin::BI__sync_and_and_fetch_4:
3881   case Builtin::BI__sync_and_and_fetch_8:
3882   case Builtin::BI__sync_and_and_fetch_16:
3883     return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
3884                                 llvm::Instruction::And);
3885   case Builtin::BI__sync_or_and_fetch_1:
3886   case Builtin::BI__sync_or_and_fetch_2:
3887   case Builtin::BI__sync_or_and_fetch_4:
3888   case Builtin::BI__sync_or_and_fetch_8:
3889   case Builtin::BI__sync_or_and_fetch_16:
3890     return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
3891                                 llvm::Instruction::Or);
3892   case Builtin::BI__sync_xor_and_fetch_1:
3893   case Builtin::BI__sync_xor_and_fetch_2:
3894   case Builtin::BI__sync_xor_and_fetch_4:
3895   case Builtin::BI__sync_xor_and_fetch_8:
3896   case Builtin::BI__sync_xor_and_fetch_16:
3897     return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
3898                                 llvm::Instruction::Xor);
3899   case Builtin::BI__sync_nand_and_fetch_1:
3900   case Builtin::BI__sync_nand_and_fetch_2:
3901   case Builtin::BI__sync_nand_and_fetch_4:
3902   case Builtin::BI__sync_nand_and_fetch_8:
3903   case Builtin::BI__sync_nand_and_fetch_16:
3904     return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
3905                                 llvm::Instruction::And, true);
3906 
3907   case Builtin::BI__sync_val_compare_and_swap_1:
3908   case Builtin::BI__sync_val_compare_and_swap_2:
3909   case Builtin::BI__sync_val_compare_and_swap_4:
3910   case Builtin::BI__sync_val_compare_and_swap_8:
3911   case Builtin::BI__sync_val_compare_and_swap_16:
3912     return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
3913 
3914   case Builtin::BI__sync_bool_compare_and_swap_1:
3915   case Builtin::BI__sync_bool_compare_and_swap_2:
3916   case Builtin::BI__sync_bool_compare_and_swap_4:
3917   case Builtin::BI__sync_bool_compare_and_swap_8:
3918   case Builtin::BI__sync_bool_compare_and_swap_16:
3919     return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
3920 
3921   case Builtin::BI__sync_swap_1:
3922   case Builtin::BI__sync_swap_2:
3923   case Builtin::BI__sync_swap_4:
3924   case Builtin::BI__sync_swap_8:
3925   case Builtin::BI__sync_swap_16:
3926     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3927 
3928   case Builtin::BI__sync_lock_test_and_set_1:
3929   case Builtin::BI__sync_lock_test_and_set_2:
3930   case Builtin::BI__sync_lock_test_and_set_4:
3931   case Builtin::BI__sync_lock_test_and_set_8:
3932   case Builtin::BI__sync_lock_test_and_set_16:
3933     return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
3934 
3935   case Builtin::BI__sync_lock_release_1:
3936   case Builtin::BI__sync_lock_release_2:
3937   case Builtin::BI__sync_lock_release_4:
3938   case Builtin::BI__sync_lock_release_8:
3939   case Builtin::BI__sync_lock_release_16: {
3940     Value *Ptr = EmitScalarExpr(E->getArg(0));
3941     QualType ElTy = E->getArg(0)->getType()->getPointeeType();
3942     CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
3943     llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
3944                                              StoreSize.getQuantity() * 8);
3945     Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
3946     llvm::StoreInst *Store =
3947       Builder.CreateAlignedStore(llvm::Constant::getNullValue(ITy), Ptr,
3948                                  StoreSize);
3949     Store->setAtomic(llvm::AtomicOrdering::Release);
3950     return RValue::get(nullptr);
3951   }
3952 
3953   case Builtin::BI__sync_synchronize: {
3954     // We assume this is supposed to correspond to a C++0x-style
3955     // sequentially-consistent fence (i.e. this is only usable for
3956     // synchronization, not device I/O or anything like that). This intrinsic
3957     // is really badly designed in the sense that in theory, there isn't
3958     // any way to safely use it... but in practice, it mostly works
3959     // to use it with non-atomic loads and stores to get acquire/release
3960     // semantics.
3961     Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
3962     return RValue::get(nullptr);
3963   }
3964 
3965   case Builtin::BI__builtin_nontemporal_load:
3966     return RValue::get(EmitNontemporalLoad(*this, E));
3967   case Builtin::BI__builtin_nontemporal_store:
3968     return RValue::get(EmitNontemporalStore(*this, E));
3969   case Builtin::BI__c11_atomic_is_lock_free:
3970   case Builtin::BI__atomic_is_lock_free: {
3971     // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
3972     // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
3973     // _Atomic(T) is always properly-aligned.
3974     const char *LibCallName = "__atomic_is_lock_free";
3975     CallArgList Args;
3976     Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
3977              getContext().getSizeType());
3978     if (BuiltinID == Builtin::BI__atomic_is_lock_free)
3979       Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
3980                getContext().VoidPtrTy);
3981     else
3982       Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
3983                getContext().VoidPtrTy);
3984     const CGFunctionInfo &FuncInfo =
3985         CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
3986     llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
3987     llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
3988     return EmitCall(FuncInfo, CGCallee::forDirect(Func),
3989                     ReturnValueSlot(), Args);
3990   }
3991 
3992   case Builtin::BI__atomic_test_and_set: {
3993     // Look at the argument type to determine whether this is a volatile
3994     // operation. The parameter type is always volatile.
3995     QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
3996     bool Volatile =
3997         PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
3998 
3999     Value *Ptr = EmitScalarExpr(E->getArg(0));
4000     unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
4001     Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
4002     Value *NewVal = Builder.getInt8(1);
4003     Value *Order = EmitScalarExpr(E->getArg(1));
4004     if (isa<llvm::ConstantInt>(Order)) {
4005       int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4006       AtomicRMWInst *Result = nullptr;
4007       switch (ord) {
4008       case 0:  // memory_order_relaxed
4009       default: // invalid order
4010         Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4011                                          llvm::AtomicOrdering::Monotonic);
4012         break;
4013       case 1: // memory_order_consume
4014       case 2: // memory_order_acquire
4015         Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4016                                          llvm::AtomicOrdering::Acquire);
4017         break;
4018       case 3: // memory_order_release
4019         Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4020                                          llvm::AtomicOrdering::Release);
4021         break;
4022       case 4: // memory_order_acq_rel
4023 
4024         Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4025                                          llvm::AtomicOrdering::AcquireRelease);
4026         break;
4027       case 5: // memory_order_seq_cst
4028         Result = Builder.CreateAtomicRMW(
4029             llvm::AtomicRMWInst::Xchg, Ptr, NewVal,
4030             llvm::AtomicOrdering::SequentiallyConsistent);
4031         break;
4032       }
4033       Result->setVolatile(Volatile);
4034       return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
4035     }
4036 
4037     llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4038 
4039     llvm::BasicBlock *BBs[5] = {
4040       createBasicBlock("monotonic", CurFn),
4041       createBasicBlock("acquire", CurFn),
4042       createBasicBlock("release", CurFn),
4043       createBasicBlock("acqrel", CurFn),
4044       createBasicBlock("seqcst", CurFn)
4045     };
4046     llvm::AtomicOrdering Orders[5] = {
4047         llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire,
4048         llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease,
4049         llvm::AtomicOrdering::SequentiallyConsistent};
4050 
4051     Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4052     llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
4053 
4054     Builder.SetInsertPoint(ContBB);
4055     PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
4056 
4057     for (unsigned i = 0; i < 5; ++i) {
4058       Builder.SetInsertPoint(BBs[i]);
4059       AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
4060                                                    Ptr, NewVal, Orders[i]);
4061       RMW->setVolatile(Volatile);
4062       Result->addIncoming(RMW, BBs[i]);
4063       Builder.CreateBr(ContBB);
4064     }
4065 
4066     SI->addCase(Builder.getInt32(0), BBs[0]);
4067     SI->addCase(Builder.getInt32(1), BBs[1]);
4068     SI->addCase(Builder.getInt32(2), BBs[1]);
4069     SI->addCase(Builder.getInt32(3), BBs[2]);
4070     SI->addCase(Builder.getInt32(4), BBs[3]);
4071     SI->addCase(Builder.getInt32(5), BBs[4]);
4072 
4073     Builder.SetInsertPoint(ContBB);
4074     return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
4075   }
4076 
4077   case Builtin::BI__atomic_clear: {
4078     QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
4079     bool Volatile =
4080         PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
4081 
4082     Address Ptr = EmitPointerWithAlignment(E->getArg(0));
4083     unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
4084     Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
4085     Value *NewVal = Builder.getInt8(0);
4086     Value *Order = EmitScalarExpr(E->getArg(1));
4087     if (isa<llvm::ConstantInt>(Order)) {
4088       int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4089       StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
4090       switch (ord) {
4091       case 0:  // memory_order_relaxed
4092       default: // invalid order
4093         Store->setOrdering(llvm::AtomicOrdering::Monotonic);
4094         break;
4095       case 3:  // memory_order_release
4096         Store->setOrdering(llvm::AtomicOrdering::Release);
4097         break;
4098       case 5:  // memory_order_seq_cst
4099         Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent);
4100         break;
4101       }
4102       return RValue::get(nullptr);
4103     }
4104 
4105     llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4106 
4107     llvm::BasicBlock *BBs[3] = {
4108       createBasicBlock("monotonic", CurFn),
4109       createBasicBlock("release", CurFn),
4110       createBasicBlock("seqcst", CurFn)
4111     };
4112     llvm::AtomicOrdering Orders[3] = {
4113         llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release,
4114         llvm::AtomicOrdering::SequentiallyConsistent};
4115 
4116     Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4117     llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
4118 
4119     for (unsigned i = 0; i < 3; ++i) {
4120       Builder.SetInsertPoint(BBs[i]);
4121       StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
4122       Store->setOrdering(Orders[i]);
4123       Builder.CreateBr(ContBB);
4124     }
4125 
4126     SI->addCase(Builder.getInt32(0), BBs[0]);
4127     SI->addCase(Builder.getInt32(3), BBs[1]);
4128     SI->addCase(Builder.getInt32(5), BBs[2]);
4129 
4130     Builder.SetInsertPoint(ContBB);
4131     return RValue::get(nullptr);
4132   }
4133 
4134   case Builtin::BI__atomic_thread_fence:
4135   case Builtin::BI__atomic_signal_fence:
4136   case Builtin::BI__c11_atomic_thread_fence:
4137   case Builtin::BI__c11_atomic_signal_fence: {
4138     llvm::SyncScope::ID SSID;
4139     if (BuiltinID == Builtin::BI__atomic_signal_fence ||
4140         BuiltinID == Builtin::BI__c11_atomic_signal_fence)
4141       SSID = llvm::SyncScope::SingleThread;
4142     else
4143       SSID = llvm::SyncScope::System;
4144     Value *Order = EmitScalarExpr(E->getArg(0));
4145     if (isa<llvm::ConstantInt>(Order)) {
4146       int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
4147       switch (ord) {
4148       case 0:  // memory_order_relaxed
4149       default: // invalid order
4150         break;
4151       case 1:  // memory_order_consume
4152       case 2:  // memory_order_acquire
4153         Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4154         break;
4155       case 3:  // memory_order_release
4156         Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4157         break;
4158       case 4:  // memory_order_acq_rel
4159         Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4160         break;
4161       case 5:  // memory_order_seq_cst
4162         Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4163         break;
4164       }
4165       return RValue::get(nullptr);
4166     }
4167 
4168     llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
4169     AcquireBB = createBasicBlock("acquire", CurFn);
4170     ReleaseBB = createBasicBlock("release", CurFn);
4171     AcqRelBB = createBasicBlock("acqrel", CurFn);
4172     SeqCstBB = createBasicBlock("seqcst", CurFn);
4173     llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
4174 
4175     Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
4176     llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
4177 
4178     Builder.SetInsertPoint(AcquireBB);
4179     Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
4180     Builder.CreateBr(ContBB);
4181     SI->addCase(Builder.getInt32(1), AcquireBB);
4182     SI->addCase(Builder.getInt32(2), AcquireBB);
4183 
4184     Builder.SetInsertPoint(ReleaseBB);
4185     Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
4186     Builder.CreateBr(ContBB);
4187     SI->addCase(Builder.getInt32(3), ReleaseBB);
4188 
4189     Builder.SetInsertPoint(AcqRelBB);
4190     Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
4191     Builder.CreateBr(ContBB);
4192     SI->addCase(Builder.getInt32(4), AcqRelBB);
4193 
4194     Builder.SetInsertPoint(SeqCstBB);
4195     Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
4196     Builder.CreateBr(ContBB);
4197     SI->addCase(Builder.getInt32(5), SeqCstBB);
4198 
4199     Builder.SetInsertPoint(ContBB);
4200     return RValue::get(nullptr);
4201   }
4202 
4203   case Builtin::BI__builtin_signbit:
4204   case Builtin::BI__builtin_signbitf:
4205   case Builtin::BI__builtin_signbitl: {
4206     return RValue::get(
4207         Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
4208                            ConvertType(E->getType())));
4209   }
4210   case Builtin::BI__warn_memset_zero_len:
4211     return RValue::getIgnored();
4212   case Builtin::BI__annotation: {
4213     // Re-encode each wide string to UTF8 and make an MDString.
4214     SmallVector<Metadata *, 1> Strings;
4215     for (const Expr *Arg : E->arguments()) {
4216       const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
4217       assert(Str->getCharByteWidth() == 2);
4218       StringRef WideBytes = Str->getBytes();
4219       std::string StrUtf8;
4220       if (!convertUTF16ToUTF8String(
4221               makeArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
4222         CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
4223         continue;
4224       }
4225       Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
4226     }
4227 
4228     // Build and MDTuple of MDStrings and emit the intrinsic call.
4229     llvm::Function *F =
4230         CGM.getIntrinsic(llvm::Intrinsic::codeview_annotation, {});
4231     MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
4232     Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
4233     return RValue::getIgnored();
4234   }
4235   case Builtin::BI__builtin_annotation: {
4236     llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
4237     llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
4238                                       AnnVal->getType());
4239 
4240     // Get the annotation string, go through casts. Sema requires this to be a
4241     // non-wide string literal, potentially casted, so the cast<> is safe.
4242     const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
4243     StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
4244     return RValue::get(
4245         EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
4246   }
4247   case Builtin::BI__builtin_addcb:
4248   case Builtin::BI__builtin_addcs:
4249   case Builtin::BI__builtin_addc:
4250   case Builtin::BI__builtin_addcl:
4251   case Builtin::BI__builtin_addcll:
4252   case Builtin::BI__builtin_subcb:
4253   case Builtin::BI__builtin_subcs:
4254   case Builtin::BI__builtin_subc:
4255   case Builtin::BI__builtin_subcl:
4256   case Builtin::BI__builtin_subcll: {
4257 
4258     // We translate all of these builtins from expressions of the form:
4259     //   int x = ..., y = ..., carryin = ..., carryout, result;
4260     //   result = __builtin_addc(x, y, carryin, &carryout);
4261     //
4262     // to LLVM IR of the form:
4263     //
4264     //   %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
4265     //   %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
4266     //   %carry1 = extractvalue {i32, i1} %tmp1, 1
4267     //   %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
4268     //                                                       i32 %carryin)
4269     //   %result = extractvalue {i32, i1} %tmp2, 0
4270     //   %carry2 = extractvalue {i32, i1} %tmp2, 1
4271     //   %tmp3 = or i1 %carry1, %carry2
4272     //   %tmp4 = zext i1 %tmp3 to i32
4273     //   store i32 %tmp4, i32* %carryout
4274 
4275     // Scalarize our inputs.
4276     llvm::Value *X = EmitScalarExpr(E->getArg(0));
4277     llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4278     llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
4279     Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
4280 
4281     // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
4282     llvm::Intrinsic::ID IntrinsicId;
4283     switch (BuiltinID) {
4284     default: llvm_unreachable("Unknown multiprecision builtin id.");
4285     case Builtin::BI__builtin_addcb:
4286     case Builtin::BI__builtin_addcs:
4287     case Builtin::BI__builtin_addc:
4288     case Builtin::BI__builtin_addcl:
4289     case Builtin::BI__builtin_addcll:
4290       IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4291       break;
4292     case Builtin::BI__builtin_subcb:
4293     case Builtin::BI__builtin_subcs:
4294     case Builtin::BI__builtin_subc:
4295     case Builtin::BI__builtin_subcl:
4296     case Builtin::BI__builtin_subcll:
4297       IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4298       break;
4299     }
4300 
4301     // Construct our resulting LLVM IR expression.
4302     llvm::Value *Carry1;
4303     llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
4304                                               X, Y, Carry1);
4305     llvm::Value *Carry2;
4306     llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
4307                                               Sum1, Carryin, Carry2);
4308     llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
4309                                                X->getType());
4310     Builder.CreateStore(CarryOut, CarryOutPtr);
4311     return RValue::get(Sum2);
4312   }
4313 
4314   case Builtin::BI__builtin_add_overflow:
4315   case Builtin::BI__builtin_sub_overflow:
4316   case Builtin::BI__builtin_mul_overflow: {
4317     const clang::Expr *LeftArg = E->getArg(0);
4318     const clang::Expr *RightArg = E->getArg(1);
4319     const clang::Expr *ResultArg = E->getArg(2);
4320 
4321     clang::QualType ResultQTy =
4322         ResultArg->getType()->castAs<PointerType>()->getPointeeType();
4323 
4324     WidthAndSignedness LeftInfo =
4325         getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
4326     WidthAndSignedness RightInfo =
4327         getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
4328     WidthAndSignedness ResultInfo =
4329         getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
4330 
4331     // Handle mixed-sign multiplication as a special case, because adding
4332     // runtime or backend support for our generic irgen would be too expensive.
4333     if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
4334       return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
4335                                           RightInfo, ResultArg, ResultQTy,
4336                                           ResultInfo);
4337 
4338     if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
4339                                               ResultInfo))
4340       return EmitCheckedUnsignedMultiplySignedResult(
4341           *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
4342           ResultInfo);
4343 
4344     WidthAndSignedness EncompassingInfo =
4345         EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
4346 
4347     llvm::Type *EncompassingLLVMTy =
4348         llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
4349 
4350     llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
4351 
4352     llvm::Intrinsic::ID IntrinsicId;
4353     switch (BuiltinID) {
4354     default:
4355       llvm_unreachable("Unknown overflow builtin id.");
4356     case Builtin::BI__builtin_add_overflow:
4357       IntrinsicId = EncompassingInfo.Signed
4358                         ? llvm::Intrinsic::sadd_with_overflow
4359                         : llvm::Intrinsic::uadd_with_overflow;
4360       break;
4361     case Builtin::BI__builtin_sub_overflow:
4362       IntrinsicId = EncompassingInfo.Signed
4363                         ? llvm::Intrinsic::ssub_with_overflow
4364                         : llvm::Intrinsic::usub_with_overflow;
4365       break;
4366     case Builtin::BI__builtin_mul_overflow:
4367       IntrinsicId = EncompassingInfo.Signed
4368                         ? llvm::Intrinsic::smul_with_overflow
4369                         : llvm::Intrinsic::umul_with_overflow;
4370       break;
4371     }
4372 
4373     llvm::Value *Left = EmitScalarExpr(LeftArg);
4374     llvm::Value *Right = EmitScalarExpr(RightArg);
4375     Address ResultPtr = EmitPointerWithAlignment(ResultArg);
4376 
4377     // Extend each operand to the encompassing type.
4378     Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
4379     Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
4380 
4381     // Perform the operation on the extended values.
4382     llvm::Value *Overflow, *Result;
4383     Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
4384 
4385     if (EncompassingInfo.Width > ResultInfo.Width) {
4386       // The encompassing type is wider than the result type, so we need to
4387       // truncate it.
4388       llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
4389 
4390       // To see if the truncation caused an overflow, we will extend
4391       // the result and then compare it to the original result.
4392       llvm::Value *ResultTruncExt = Builder.CreateIntCast(
4393           ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
4394       llvm::Value *TruncationOverflow =
4395           Builder.CreateICmpNE(Result, ResultTruncExt);
4396 
4397       Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
4398       Result = ResultTrunc;
4399     }
4400 
4401     // Finally, store the result using the pointer.
4402     bool isVolatile =
4403       ResultArg->getType()->getPointeeType().isVolatileQualified();
4404     Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
4405 
4406     return RValue::get(Overflow);
4407   }
4408 
4409   case Builtin::BI__builtin_uadd_overflow:
4410   case Builtin::BI__builtin_uaddl_overflow:
4411   case Builtin::BI__builtin_uaddll_overflow:
4412   case Builtin::BI__builtin_usub_overflow:
4413   case Builtin::BI__builtin_usubl_overflow:
4414   case Builtin::BI__builtin_usubll_overflow:
4415   case Builtin::BI__builtin_umul_overflow:
4416   case Builtin::BI__builtin_umull_overflow:
4417   case Builtin::BI__builtin_umulll_overflow:
4418   case Builtin::BI__builtin_sadd_overflow:
4419   case Builtin::BI__builtin_saddl_overflow:
4420   case Builtin::BI__builtin_saddll_overflow:
4421   case Builtin::BI__builtin_ssub_overflow:
4422   case Builtin::BI__builtin_ssubl_overflow:
4423   case Builtin::BI__builtin_ssubll_overflow:
4424   case Builtin::BI__builtin_smul_overflow:
4425   case Builtin::BI__builtin_smull_overflow:
4426   case Builtin::BI__builtin_smulll_overflow: {
4427 
4428     // We translate all of these builtins directly to the relevant llvm IR node.
4429 
4430     // Scalarize our inputs.
4431     llvm::Value *X = EmitScalarExpr(E->getArg(0));
4432     llvm::Value *Y = EmitScalarExpr(E->getArg(1));
4433     Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
4434 
4435     // Decide which of the overflow intrinsics we are lowering to:
4436     llvm::Intrinsic::ID IntrinsicId;
4437     switch (BuiltinID) {
4438     default: llvm_unreachable("Unknown overflow builtin id.");
4439     case Builtin::BI__builtin_uadd_overflow:
4440     case Builtin::BI__builtin_uaddl_overflow:
4441     case Builtin::BI__builtin_uaddll_overflow:
4442       IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
4443       break;
4444     case Builtin::BI__builtin_usub_overflow:
4445     case Builtin::BI__builtin_usubl_overflow:
4446     case Builtin::BI__builtin_usubll_overflow:
4447       IntrinsicId = llvm::Intrinsic::usub_with_overflow;
4448       break;
4449     case Builtin::BI__builtin_umul_overflow:
4450     case Builtin::BI__builtin_umull_overflow:
4451     case Builtin::BI__builtin_umulll_overflow:
4452       IntrinsicId = llvm::Intrinsic::umul_with_overflow;
4453       break;
4454     case Builtin::BI__builtin_sadd_overflow:
4455     case Builtin::BI__builtin_saddl_overflow:
4456     case Builtin::BI__builtin_saddll_overflow:
4457       IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
4458       break;
4459     case Builtin::BI__builtin_ssub_overflow:
4460     case Builtin::BI__builtin_ssubl_overflow:
4461     case Builtin::BI__builtin_ssubll_overflow:
4462       IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
4463       break;
4464     case Builtin::BI__builtin_smul_overflow:
4465     case Builtin::BI__builtin_smull_overflow:
4466     case Builtin::BI__builtin_smulll_overflow:
4467       IntrinsicId = llvm::Intrinsic::smul_with_overflow;
4468       break;
4469     }
4470 
4471 
4472     llvm::Value *Carry;
4473     llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
4474     Builder.CreateStore(Sum, SumOutPtr);
4475 
4476     return RValue::get(Carry);
4477   }
4478   case Builtin::BI__builtin_addressof:
4479     return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
4480   case Builtin::BI__builtin_operator_new:
4481     return EmitBuiltinNewDeleteCall(
4482         E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
4483   case Builtin::BI__builtin_operator_delete:
4484     return EmitBuiltinNewDeleteCall(
4485         E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
4486 
4487   case Builtin::BI__builtin_is_aligned:
4488     return EmitBuiltinIsAligned(E);
4489   case Builtin::BI__builtin_align_up:
4490     return EmitBuiltinAlignTo(E, true);
4491   case Builtin::BI__builtin_align_down:
4492     return EmitBuiltinAlignTo(E, false);
4493 
4494   case Builtin::BI__noop:
4495     // __noop always evaluates to an integer literal zero.
4496     return RValue::get(ConstantInt::get(IntTy, 0));
4497   case Builtin::BI__builtin_call_with_static_chain: {
4498     const CallExpr *Call = cast<CallExpr>(E->getArg(0));
4499     const Expr *Chain = E->getArg(1);
4500     return EmitCall(Call->getCallee()->getType(),
4501                     EmitCallee(Call->getCallee()), Call, ReturnValue,
4502                     EmitScalarExpr(Chain));
4503   }
4504   case Builtin::BI_InterlockedExchange8:
4505   case Builtin::BI_InterlockedExchange16:
4506   case Builtin::BI_InterlockedExchange:
4507   case Builtin::BI_InterlockedExchangePointer:
4508     return RValue::get(
4509         EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
4510   case Builtin::BI_InterlockedCompareExchangePointer:
4511   case Builtin::BI_InterlockedCompareExchangePointer_nf: {
4512     llvm::Type *RTy;
4513     llvm::IntegerType *IntType =
4514       IntegerType::get(getLLVMContext(),
4515                        getContext().getTypeSize(E->getType()));
4516     llvm::Type *IntPtrType = IntType->getPointerTo();
4517 
4518     llvm::Value *Destination =
4519       Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
4520 
4521     llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
4522     RTy = Exchange->getType();
4523     Exchange = Builder.CreatePtrToInt(Exchange, IntType);
4524 
4525     llvm::Value *Comparand =
4526       Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
4527 
4528     auto Ordering =
4529       BuiltinID == Builtin::BI_InterlockedCompareExchangePointer_nf ?
4530       AtomicOrdering::Monotonic : AtomicOrdering::SequentiallyConsistent;
4531 
4532     auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
4533                                               Ordering, Ordering);
4534     Result->setVolatile(true);
4535 
4536     return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
4537                                                                          0),
4538                                               RTy));
4539   }
4540   case Builtin::BI_InterlockedCompareExchange8:
4541   case Builtin::BI_InterlockedCompareExchange16:
4542   case Builtin::BI_InterlockedCompareExchange:
4543   case Builtin::BI_InterlockedCompareExchange64:
4544     return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
4545   case Builtin::BI_InterlockedIncrement16:
4546   case Builtin::BI_InterlockedIncrement:
4547     return RValue::get(
4548         EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
4549   case Builtin::BI_InterlockedDecrement16:
4550   case Builtin::BI_InterlockedDecrement:
4551     return RValue::get(
4552         EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
4553   case Builtin::BI_InterlockedAnd8:
4554   case Builtin::BI_InterlockedAnd16:
4555   case Builtin::BI_InterlockedAnd:
4556     return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
4557   case Builtin::BI_InterlockedExchangeAdd8:
4558   case Builtin::BI_InterlockedExchangeAdd16:
4559   case Builtin::BI_InterlockedExchangeAdd:
4560     return RValue::get(
4561         EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
4562   case Builtin::BI_InterlockedExchangeSub8:
4563   case Builtin::BI_InterlockedExchangeSub16:
4564   case Builtin::BI_InterlockedExchangeSub:
4565     return RValue::get(
4566         EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
4567   case Builtin::BI_InterlockedOr8:
4568   case Builtin::BI_InterlockedOr16:
4569   case Builtin::BI_InterlockedOr:
4570     return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
4571   case Builtin::BI_InterlockedXor8:
4572   case Builtin::BI_InterlockedXor16:
4573   case Builtin::BI_InterlockedXor:
4574     return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
4575 
4576   case Builtin::BI_bittest64:
4577   case Builtin::BI_bittest:
4578   case Builtin::BI_bittestandcomplement64:
4579   case Builtin::BI_bittestandcomplement:
4580   case Builtin::BI_bittestandreset64:
4581   case Builtin::BI_bittestandreset:
4582   case Builtin::BI_bittestandset64:
4583   case Builtin::BI_bittestandset:
4584   case Builtin::BI_interlockedbittestandreset:
4585   case Builtin::BI_interlockedbittestandreset64:
4586   case Builtin::BI_interlockedbittestandset64:
4587   case Builtin::BI_interlockedbittestandset:
4588   case Builtin::BI_interlockedbittestandset_acq:
4589   case Builtin::BI_interlockedbittestandset_rel:
4590   case Builtin::BI_interlockedbittestandset_nf:
4591   case Builtin::BI_interlockedbittestandreset_acq:
4592   case Builtin::BI_interlockedbittestandreset_rel:
4593   case Builtin::BI_interlockedbittestandreset_nf:
4594     return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
4595 
4596     // These builtins exist to emit regular volatile loads and stores not
4597     // affected by the -fms-volatile setting.
4598   case Builtin::BI__iso_volatile_load8:
4599   case Builtin::BI__iso_volatile_load16:
4600   case Builtin::BI__iso_volatile_load32:
4601   case Builtin::BI__iso_volatile_load64:
4602     return RValue::get(EmitISOVolatileLoad(*this, E));
4603   case Builtin::BI__iso_volatile_store8:
4604   case Builtin::BI__iso_volatile_store16:
4605   case Builtin::BI__iso_volatile_store32:
4606   case Builtin::BI__iso_volatile_store64:
4607     return RValue::get(EmitISOVolatileStore(*this, E));
4608 
4609   case Builtin::BI__exception_code:
4610   case Builtin::BI_exception_code:
4611     return RValue::get(EmitSEHExceptionCode());
4612   case Builtin::BI__exception_info:
4613   case Builtin::BI_exception_info:
4614     return RValue::get(EmitSEHExceptionInfo());
4615   case Builtin::BI__abnormal_termination:
4616   case Builtin::BI_abnormal_termination:
4617     return RValue::get(EmitSEHAbnormalTermination());
4618   case Builtin::BI_setjmpex:
4619     if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4620         E->getArg(0)->getType()->isPointerType())
4621       return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4622     break;
4623   case Builtin::BI_setjmp:
4624     if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
4625         E->getArg(0)->getType()->isPointerType()) {
4626       if (getTarget().getTriple().getArch() == llvm::Triple::x86)
4627         return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
4628       else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
4629         return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
4630       return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
4631     }
4632     break;
4633 
4634   case Builtin::BI__GetExceptionInfo: {
4635     if (llvm::GlobalVariable *GV =
4636             CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
4637       return RValue::get(llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy));
4638     break;
4639   }
4640 
4641   case Builtin::BI__fastfail:
4642     return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
4643 
4644   case Builtin::BI__builtin_coro_size: {
4645     auto & Context = getContext();
4646     auto SizeTy = Context.getSizeType();
4647     auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4648     Function *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
4649     return RValue::get(Builder.CreateCall(F));
4650   }
4651 
4652   case Builtin::BI__builtin_coro_id:
4653     return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
4654   case Builtin::BI__builtin_coro_promise:
4655     return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
4656   case Builtin::BI__builtin_coro_resume:
4657     return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
4658   case Builtin::BI__builtin_coro_frame:
4659     return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
4660   case Builtin::BI__builtin_coro_noop:
4661     return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
4662   case Builtin::BI__builtin_coro_free:
4663     return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
4664   case Builtin::BI__builtin_coro_destroy:
4665     return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
4666   case Builtin::BI__builtin_coro_done:
4667     return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
4668   case Builtin::BI__builtin_coro_alloc:
4669     return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
4670   case Builtin::BI__builtin_coro_begin:
4671     return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
4672   case Builtin::BI__builtin_coro_end:
4673     return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
4674   case Builtin::BI__builtin_coro_suspend:
4675     return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
4676   case Builtin::BI__builtin_coro_param:
4677     return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
4678 
4679   // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
4680   case Builtin::BIread_pipe:
4681   case Builtin::BIwrite_pipe: {
4682     Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4683           *Arg1 = EmitScalarExpr(E->getArg(1));
4684     CGOpenCLRuntime OpenCLRT(CGM);
4685     Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4686     Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4687 
4688     // Type of the generic packet parameter.
4689     unsigned GenericAS =
4690         getContext().getTargetAddressSpace(LangAS::opencl_generic);
4691     llvm::Type *I8PTy = llvm::PointerType::get(
4692         llvm::Type::getInt8Ty(getLLVMContext()), GenericAS);
4693 
4694     // Testing which overloaded version we should generate the call for.
4695     if (2U == E->getNumArgs()) {
4696       const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
4697                                                              : "__write_pipe_2";
4698       // Creating a generic function type to be able to call with any builtin or
4699       // user defined type.
4700       llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
4701       llvm::FunctionType *FTy = llvm::FunctionType::get(
4702           Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4703       Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
4704       return RValue::get(
4705           EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4706                           {Arg0, BCast, PacketSize, PacketAlign}));
4707     } else {
4708       assert(4 == E->getNumArgs() &&
4709              "Illegal number of parameters to pipe function");
4710       const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
4711                                                              : "__write_pipe_4";
4712 
4713       llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
4714                               Int32Ty, Int32Ty};
4715       Value *Arg2 = EmitScalarExpr(E->getArg(2)),
4716             *Arg3 = EmitScalarExpr(E->getArg(3));
4717       llvm::FunctionType *FTy = llvm::FunctionType::get(
4718           Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4719       Value *BCast = Builder.CreatePointerCast(Arg3, I8PTy);
4720       // We know the third argument is an integer type, but we may need to cast
4721       // it to i32.
4722       if (Arg2->getType() != Int32Ty)
4723         Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
4724       return RValue::get(
4725           EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4726                           {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
4727     }
4728   }
4729   // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
4730   // functions
4731   case Builtin::BIreserve_read_pipe:
4732   case Builtin::BIreserve_write_pipe:
4733   case Builtin::BIwork_group_reserve_read_pipe:
4734   case Builtin::BIwork_group_reserve_write_pipe:
4735   case Builtin::BIsub_group_reserve_read_pipe:
4736   case Builtin::BIsub_group_reserve_write_pipe: {
4737     // Composing the mangled name for the function.
4738     const char *Name;
4739     if (BuiltinID == Builtin::BIreserve_read_pipe)
4740       Name = "__reserve_read_pipe";
4741     else if (BuiltinID == Builtin::BIreserve_write_pipe)
4742       Name = "__reserve_write_pipe";
4743     else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
4744       Name = "__work_group_reserve_read_pipe";
4745     else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
4746       Name = "__work_group_reserve_write_pipe";
4747     else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
4748       Name = "__sub_group_reserve_read_pipe";
4749     else
4750       Name = "__sub_group_reserve_write_pipe";
4751 
4752     Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4753           *Arg1 = EmitScalarExpr(E->getArg(1));
4754     llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
4755     CGOpenCLRuntime OpenCLRT(CGM);
4756     Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4757     Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4758 
4759     // Building the generic function prototype.
4760     llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
4761     llvm::FunctionType *FTy = llvm::FunctionType::get(
4762         ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4763     // We know the second argument is an integer type, but we may need to cast
4764     // it to i32.
4765     if (Arg1->getType() != Int32Ty)
4766       Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
4767     return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4768                                        {Arg0, Arg1, PacketSize, PacketAlign}));
4769   }
4770   // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
4771   // functions
4772   case Builtin::BIcommit_read_pipe:
4773   case Builtin::BIcommit_write_pipe:
4774   case Builtin::BIwork_group_commit_read_pipe:
4775   case Builtin::BIwork_group_commit_write_pipe:
4776   case Builtin::BIsub_group_commit_read_pipe:
4777   case Builtin::BIsub_group_commit_write_pipe: {
4778     const char *Name;
4779     if (BuiltinID == Builtin::BIcommit_read_pipe)
4780       Name = "__commit_read_pipe";
4781     else if (BuiltinID == Builtin::BIcommit_write_pipe)
4782       Name = "__commit_write_pipe";
4783     else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
4784       Name = "__work_group_commit_read_pipe";
4785     else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
4786       Name = "__work_group_commit_write_pipe";
4787     else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
4788       Name = "__sub_group_commit_read_pipe";
4789     else
4790       Name = "__sub_group_commit_write_pipe";
4791 
4792     Value *Arg0 = EmitScalarExpr(E->getArg(0)),
4793           *Arg1 = EmitScalarExpr(E->getArg(1));
4794     CGOpenCLRuntime OpenCLRT(CGM);
4795     Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4796     Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4797 
4798     // Building the generic function prototype.
4799     llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
4800     llvm::FunctionType *FTy =
4801         llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
4802                                 llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4803 
4804     return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4805                                        {Arg0, Arg1, PacketSize, PacketAlign}));
4806   }
4807   // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
4808   case Builtin::BIget_pipe_num_packets:
4809   case Builtin::BIget_pipe_max_packets: {
4810     const char *BaseName;
4811     const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
4812     if (BuiltinID == Builtin::BIget_pipe_num_packets)
4813       BaseName = "__get_pipe_num_packets";
4814     else
4815       BaseName = "__get_pipe_max_packets";
4816     std::string Name = std::string(BaseName) +
4817                        std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
4818 
4819     // Building the generic function prototype.
4820     Value *Arg0 = EmitScalarExpr(E->getArg(0));
4821     CGOpenCLRuntime OpenCLRT(CGM);
4822     Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
4823     Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
4824     llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
4825     llvm::FunctionType *FTy = llvm::FunctionType::get(
4826         Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4827 
4828     return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
4829                                        {Arg0, PacketSize, PacketAlign}));
4830   }
4831 
4832   // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
4833   case Builtin::BIto_global:
4834   case Builtin::BIto_local:
4835   case Builtin::BIto_private: {
4836     auto Arg0 = EmitScalarExpr(E->getArg(0));
4837     auto NewArgT = llvm::PointerType::get(Int8Ty,
4838       CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4839     auto NewRetT = llvm::PointerType::get(Int8Ty,
4840       CGM.getContext().getTargetAddressSpace(
4841         E->getType()->getPointeeType().getAddressSpace()));
4842     auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
4843     llvm::Value *NewArg;
4844     if (Arg0->getType()->getPointerAddressSpace() !=
4845         NewArgT->getPointerAddressSpace())
4846       NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
4847     else
4848       NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
4849     auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
4850     auto NewCall =
4851         EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
4852     return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
4853       ConvertType(E->getType())));
4854   }
4855 
4856   // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
4857   // It contains four different overload formats specified in Table 6.13.17.1.
4858   case Builtin::BIenqueue_kernel: {
4859     StringRef Name; // Generated function call name
4860     unsigned NumArgs = E->getNumArgs();
4861 
4862     llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
4863     llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
4864         getContext().getTargetAddressSpace(LangAS::opencl_generic));
4865 
4866     llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
4867     llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
4868     LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
4869     llvm::Value *Range = NDRangeL.getAddress(*this).getPointer();
4870     llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
4871 
4872     if (NumArgs == 4) {
4873       // The most basic form of the call with parameters:
4874       // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
4875       Name = "__enqueue_kernel_basic";
4876       llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy,
4877                               GenericVoidPtrTy};
4878       llvm::FunctionType *FTy = llvm::FunctionType::get(
4879           Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
4880 
4881       auto Info =
4882           CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4883       llvm::Value *Kernel =
4884           Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4885       llvm::Value *Block =
4886           Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4887 
4888       AttrBuilder B;
4889       B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
4890       llvm::AttributeList ByValAttrSet =
4891           llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
4892 
4893       auto RTCall =
4894           EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
4895                           {Queue, Flags, Range, Kernel, Block});
4896       RTCall->setAttributes(ByValAttrSet);
4897       return RValue::get(RTCall);
4898     }
4899     assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
4900 
4901     // Create a temporary array to hold the sizes of local pointer arguments
4902     // for the block. \p First is the position of the first size argument.
4903     auto CreateArrayForSizeVar = [=](unsigned First)
4904         -> std::tuple<llvm::Value *, llvm::Value *, llvm::Value *> {
4905       llvm::APInt ArraySize(32, NumArgs - First);
4906       QualType SizeArrayTy = getContext().getConstantArrayType(
4907           getContext().getSizeType(), ArraySize, nullptr, ArrayType::Normal,
4908           /*IndexTypeQuals=*/0);
4909       auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
4910       llvm::Value *TmpPtr = Tmp.getPointer();
4911       llvm::Value *TmpSize = EmitLifetimeStart(
4912           CGM.getDataLayout().getTypeAllocSize(Tmp.getElementType()), TmpPtr);
4913       llvm::Value *ElemPtr;
4914       // Each of the following arguments specifies the size of the corresponding
4915       // argument passed to the enqueued block.
4916       auto *Zero = llvm::ConstantInt::get(IntTy, 0);
4917       for (unsigned I = First; I < NumArgs; ++I) {
4918         auto *Index = llvm::ConstantInt::get(IntTy, I - First);
4919         auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr,
4920                                       {Zero, Index});
4921         if (I == First)
4922           ElemPtr = GEP;
4923         auto *V =
4924             Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
4925         Builder.CreateAlignedStore(
4926             V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
4927       }
4928       return std::tie(ElemPtr, TmpSize, TmpPtr);
4929     };
4930 
4931     // Could have events and/or varargs.
4932     if (E->getArg(3)->getType()->isBlockPointerType()) {
4933       // No events passed, but has variadic arguments.
4934       Name = "__enqueue_kernel_varargs";
4935       auto Info =
4936           CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
4937       llvm::Value *Kernel =
4938           Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4939       auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4940       llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
4941       std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(4);
4942 
4943       // Create a vector of the arguments, as well as a constant value to
4944       // express to the runtime the number of variadic arguments.
4945       llvm::Value *const Args[] = {Queue,  Flags,
4946                                    Range,  Kernel,
4947                                    Block,  ConstantInt::get(IntTy, NumArgs - 4),
4948                                    ElemPtr};
4949       llvm::Type *const ArgTys[] = {
4950           QueueTy,          IntTy, RangeTy,           GenericVoidPtrTy,
4951           GenericVoidPtrTy, IntTy, ElemPtr->getType()};
4952 
4953       llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
4954       auto Call = RValue::get(
4955           EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
4956       if (TmpSize)
4957         EmitLifetimeEnd(TmpSize, TmpPtr);
4958       return Call;
4959     }
4960     // Any calls now have event arguments passed.
4961     if (NumArgs >= 7) {
4962       llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
4963       llvm::PointerType *EventPtrTy = EventTy->getPointerTo(
4964           CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
4965 
4966       llvm::Value *NumEvents =
4967           Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
4968 
4969       // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
4970       // to be a null pointer constant (including `0` literal), we can take it
4971       // into account and emit null pointer directly.
4972       llvm::Value *EventWaitList = nullptr;
4973       if (E->getArg(4)->isNullPointerConstant(
4974               getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4975         EventWaitList = llvm::ConstantPointerNull::get(EventPtrTy);
4976       } else {
4977         EventWaitList = E->getArg(4)->getType()->isArrayType()
4978                         ? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
4979                         : EmitScalarExpr(E->getArg(4));
4980         // Convert to generic address space.
4981         EventWaitList = Builder.CreatePointerCast(EventWaitList, EventPtrTy);
4982       }
4983       llvm::Value *EventRet = nullptr;
4984       if (E->getArg(5)->isNullPointerConstant(
4985               getContext(), Expr::NPC_ValueDependentIsNotNull)) {
4986         EventRet = llvm::ConstantPointerNull::get(EventPtrTy);
4987       } else {
4988         EventRet =
4989             Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), EventPtrTy);
4990       }
4991 
4992       auto Info =
4993           CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
4994       llvm::Value *Kernel =
4995           Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
4996       llvm::Value *Block =
4997           Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
4998 
4999       std::vector<llvm::Type *> ArgTys = {
5000           QueueTy,    Int32Ty,    RangeTy,          Int32Ty,
5001           EventPtrTy, EventPtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
5002 
5003       std::vector<llvm::Value *> Args = {Queue,     Flags,         Range,
5004                                          NumEvents, EventWaitList, EventRet,
5005                                          Kernel,    Block};
5006 
5007       if (NumArgs == 7) {
5008         // Has events but no variadics.
5009         Name = "__enqueue_kernel_basic_events";
5010         llvm::FunctionType *FTy = llvm::FunctionType::get(
5011             Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5012         return RValue::get(
5013             EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5014                             llvm::ArrayRef<llvm::Value *>(Args)));
5015       }
5016       // Has event info and variadics
5017       // Pass the number of variadics to the runtime function too.
5018       Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
5019       ArgTys.push_back(Int32Ty);
5020       Name = "__enqueue_kernel_events_varargs";
5021 
5022       llvm::Value *ElemPtr, *TmpSize, *TmpPtr;
5023       std::tie(ElemPtr, TmpSize, TmpPtr) = CreateArrayForSizeVar(7);
5024       Args.push_back(ElemPtr);
5025       ArgTys.push_back(ElemPtr->getType());
5026 
5027       llvm::FunctionType *FTy = llvm::FunctionType::get(
5028           Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
5029       auto Call =
5030           RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5031                                       llvm::ArrayRef<llvm::Value *>(Args)));
5032       if (TmpSize)
5033         EmitLifetimeEnd(TmpSize, TmpPtr);
5034       return Call;
5035     }
5036     LLVM_FALLTHROUGH;
5037   }
5038   // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
5039   // parameter.
5040   case Builtin::BIget_kernel_work_group_size: {
5041     llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
5042         getContext().getTargetAddressSpace(LangAS::opencl_generic));
5043     auto Info =
5044         CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
5045     Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
5046     Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5047     return RValue::get(EmitRuntimeCall(
5048         CGM.CreateRuntimeFunction(
5049             llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
5050                                     false),
5051             "__get_kernel_work_group_size_impl"),
5052         {Kernel, Arg}));
5053   }
5054   case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
5055     llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
5056         getContext().getTargetAddressSpace(LangAS::opencl_generic));
5057     auto Info =
5058         CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
5059     Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
5060     Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5061     return RValue::get(EmitRuntimeCall(
5062         CGM.CreateRuntimeFunction(
5063             llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
5064                                     false),
5065             "__get_kernel_preferred_work_group_size_multiple_impl"),
5066         {Kernel, Arg}));
5067   }
5068   case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
5069   case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
5070     llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
5071         getContext().getTargetAddressSpace(LangAS::opencl_generic));
5072     LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
5073     llvm::Value *NDRange = NDRangeL.getAddress(*this).getPointer();
5074     auto Info =
5075         CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
5076     Value *Kernel = Builder.CreatePointerCast(Info.Kernel, GenericVoidPtrTy);
5077     Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5078     const char *Name =
5079         BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
5080             ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
5081             : "__get_kernel_sub_group_count_for_ndrange_impl";
5082     return RValue::get(EmitRuntimeCall(
5083         CGM.CreateRuntimeFunction(
5084             llvm::FunctionType::get(
5085                 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
5086                 false),
5087             Name),
5088         {NDRange, Kernel, Block}));
5089   }
5090 
5091   case Builtin::BI__builtin_store_half:
5092   case Builtin::BI__builtin_store_halff: {
5093     Value *Val = EmitScalarExpr(E->getArg(0));
5094     Address Address = EmitPointerWithAlignment(E->getArg(1));
5095     Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
5096     return RValue::get(Builder.CreateStore(HalfVal, Address));
5097   }
5098   case Builtin::BI__builtin_load_half: {
5099     Address Address = EmitPointerWithAlignment(E->getArg(0));
5100     Value *HalfVal = Builder.CreateLoad(Address);
5101     return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
5102   }
5103   case Builtin::BI__builtin_load_halff: {
5104     Address Address = EmitPointerWithAlignment(E->getArg(0));
5105     Value *HalfVal = Builder.CreateLoad(Address);
5106     return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
5107   }
5108   case Builtin::BIprintf:
5109     if (getTarget().getTriple().isNVPTX() ||
5110         getTarget().getTriple().isAMDGCN()) {
5111       if (getLangOpts().OpenMPIsDevice)
5112         return EmitOpenMPDevicePrintfCallExpr(E);
5113       if (getTarget().getTriple().isNVPTX())
5114         return EmitNVPTXDevicePrintfCallExpr(E);
5115       if (getTarget().getTriple().isAMDGCN() && getLangOpts().HIP)
5116         return EmitAMDGPUDevicePrintfCallExpr(E);
5117     }
5118 
5119     break;
5120   case Builtin::BI__builtin_canonicalize:
5121   case Builtin::BI__builtin_canonicalizef:
5122   case Builtin::BI__builtin_canonicalizef16:
5123   case Builtin::BI__builtin_canonicalizel:
5124     return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::canonicalize));
5125 
5126   case Builtin::BI__builtin_thread_pointer: {
5127     if (!getContext().getTargetInfo().isTLSSupported())
5128       CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
5129     // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
5130     break;
5131   }
5132   case Builtin::BI__builtin_os_log_format:
5133     return emitBuiltinOSLogFormat(*E);
5134 
5135   case Builtin::BI__xray_customevent: {
5136     if (!ShouldXRayInstrumentFunction())
5137       return RValue::getIgnored();
5138 
5139     if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5140             XRayInstrKind::Custom))
5141       return RValue::getIgnored();
5142 
5143     if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5144       if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
5145         return RValue::getIgnored();
5146 
5147     Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
5148     auto FTy = F->getFunctionType();
5149     auto Arg0 = E->getArg(0);
5150     auto Arg0Val = EmitScalarExpr(Arg0);
5151     auto Arg0Ty = Arg0->getType();
5152     auto PTy0 = FTy->getParamType(0);
5153     if (PTy0 != Arg0Val->getType()) {
5154       if (Arg0Ty->isArrayType())
5155         Arg0Val = EmitArrayToPointerDecay(Arg0).getPointer();
5156       else
5157         Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
5158     }
5159     auto Arg1 = EmitScalarExpr(E->getArg(1));
5160     auto PTy1 = FTy->getParamType(1);
5161     if (PTy1 != Arg1->getType())
5162       Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
5163     return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
5164   }
5165 
5166   case Builtin::BI__xray_typedevent: {
5167     // TODO: There should be a way to always emit events even if the current
5168     // function is not instrumented. Losing events in a stream can cripple
5169     // a trace.
5170     if (!ShouldXRayInstrumentFunction())
5171       return RValue::getIgnored();
5172 
5173     if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
5174             XRayInstrKind::Typed))
5175       return RValue::getIgnored();
5176 
5177     if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
5178       if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
5179         return RValue::getIgnored();
5180 
5181     Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
5182     auto FTy = F->getFunctionType();
5183     auto Arg0 = EmitScalarExpr(E->getArg(0));
5184     auto PTy0 = FTy->getParamType(0);
5185     if (PTy0 != Arg0->getType())
5186       Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
5187     auto Arg1 = E->getArg(1);
5188     auto Arg1Val = EmitScalarExpr(Arg1);
5189     auto Arg1Ty = Arg1->getType();
5190     auto PTy1 = FTy->getParamType(1);
5191     if (PTy1 != Arg1Val->getType()) {
5192       if (Arg1Ty->isArrayType())
5193         Arg1Val = EmitArrayToPointerDecay(Arg1).getPointer();
5194       else
5195         Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
5196     }
5197     auto Arg2 = EmitScalarExpr(E->getArg(2));
5198     auto PTy2 = FTy->getParamType(2);
5199     if (PTy2 != Arg2->getType())
5200       Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
5201     return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
5202   }
5203 
5204   case Builtin::BI__builtin_ms_va_start:
5205   case Builtin::BI__builtin_ms_va_end:
5206     return RValue::get(
5207         EmitVAStartEnd(EmitMSVAListRef(E->getArg(0)).getPointer(),
5208                        BuiltinID == Builtin::BI__builtin_ms_va_start));
5209 
5210   case Builtin::BI__builtin_ms_va_copy: {
5211     // Lower this manually. We can't reliably determine whether or not any
5212     // given va_copy() is for a Win64 va_list from the calling convention
5213     // alone, because it's legal to do this from a System V ABI function.
5214     // With opaque pointer types, we won't have enough information in LLVM
5215     // IR to determine this from the argument types, either. Best to do it
5216     // now, while we have enough information.
5217     Address DestAddr = EmitMSVAListRef(E->getArg(0));
5218     Address SrcAddr = EmitMSVAListRef(E->getArg(1));
5219 
5220     llvm::Type *BPP = Int8PtrPtrTy;
5221 
5222     DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), BPP, "cp"),
5223                        DestAddr.getAlignment());
5224     SrcAddr = Address(Builder.CreateBitCast(SrcAddr.getPointer(), BPP, "ap"),
5225                       SrcAddr.getAlignment());
5226 
5227     Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
5228     return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
5229   }
5230 
5231   case Builtin::BI__builtin_get_device_side_mangled_name: {
5232     auto Name = CGM.getCUDARuntime().getDeviceSideName(
5233         cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
5234     auto Str = CGM.GetAddrOfConstantCString(Name, "");
5235     llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
5236                                llvm::ConstantInt::get(SizeTy, 0)};
5237     auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(),
5238                                                      Str.getPointer(), Zeros);
5239     return RValue::get(Ptr);
5240   }
5241   }
5242 
5243   // If this is an alias for a lib function (e.g. __builtin_sin), emit
5244   // the call using the normal call path, but using the unmangled
5245   // version of the function name.
5246   if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
5247     return emitLibraryCall(*this, FD, E,
5248                            CGM.getBuiltinLibFunction(FD, BuiltinID));
5249 
5250   // If this is a predefined lib function (e.g. malloc), emit the call
5251   // using exactly the normal call path.
5252   if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
5253     return emitLibraryCall(*this, FD, E,
5254                       cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
5255 
5256   // Check that a call to a target specific builtin has the correct target
5257   // features.
5258   // This is down here to avoid non-target specific builtins, however, if
5259   // generic builtins start to require generic target features then we
5260   // can move this up to the beginning of the function.
5261   checkTargetFeatures(E, FD);
5262 
5263   if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
5264     LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
5265 
5266   // See if we have a target specific intrinsic.
5267   const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
5268   Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
5269   StringRef Prefix =
5270       llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
5271   if (!Prefix.empty()) {
5272     IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
5273     // NOTE we don't need to perform a compatibility flag check here since the
5274     // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
5275     // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
5276     if (IntrinsicID == Intrinsic::not_intrinsic)
5277       IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
5278   }
5279 
5280   if (IntrinsicID != Intrinsic::not_intrinsic) {
5281     SmallVector<Value*, 16> Args;
5282 
5283     // Find out if any arguments are required to be integer constant
5284     // expressions.
5285     unsigned ICEArguments = 0;
5286     ASTContext::GetBuiltinTypeError Error;
5287     getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
5288     assert(Error == ASTContext::GE_None && "Should not codegen an error");
5289 
5290     Function *F = CGM.getIntrinsic(IntrinsicID);
5291     llvm::FunctionType *FTy = F->getFunctionType();
5292 
5293     for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
5294       Value *ArgValue;
5295       // If this is a normal argument, just emit it as a scalar.
5296       if ((ICEArguments & (1 << i)) == 0) {
5297         ArgValue = EmitScalarExpr(E->getArg(i));
5298       } else {
5299         // If this is required to be a constant, constant fold it so that we
5300         // know that the generated intrinsic gets a ConstantInt.
5301         ArgValue = llvm::ConstantInt::get(
5302             getLLVMContext(),
5303             *E->getArg(i)->getIntegerConstantExpr(getContext()));
5304       }
5305 
5306       // If the intrinsic arg type is different from the builtin arg type
5307       // we need to do a bit cast.
5308       llvm::Type *PTy = FTy->getParamType(i);
5309       if (PTy != ArgValue->getType()) {
5310         // XXX - vector of pointers?
5311         if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
5312           if (PtrTy->getAddressSpace() !=
5313               ArgValue->getType()->getPointerAddressSpace()) {
5314             ArgValue = Builder.CreateAddrSpaceCast(
5315               ArgValue,
5316               ArgValue->getType()->getPointerTo(PtrTy->getAddressSpace()));
5317           }
5318         }
5319 
5320         assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
5321                "Must be able to losslessly bit cast to param");
5322         ArgValue = Builder.CreateBitCast(ArgValue, PTy);
5323       }
5324 
5325       Args.push_back(ArgValue);
5326     }
5327 
5328     Value *V = Builder.CreateCall(F, Args);
5329     QualType BuiltinRetType = E->getType();
5330 
5331     llvm::Type *RetTy = VoidTy;
5332     if (!BuiltinRetType->isVoidType())
5333       RetTy = ConvertType(BuiltinRetType);
5334 
5335     if (RetTy != V->getType()) {
5336       // XXX - vector of pointers?
5337       if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
5338         if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
5339           V = Builder.CreateAddrSpaceCast(
5340             V, V->getType()->getPointerTo(PtrTy->getAddressSpace()));
5341         }
5342       }
5343 
5344       assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
5345              "Must be able to losslessly bit cast result type");
5346       V = Builder.CreateBitCast(V, RetTy);
5347     }
5348 
5349     return RValue::get(V);
5350   }
5351 
5352   // Some target-specific builtins can have aggregate return values, e.g.
5353   // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
5354   // ReturnValue to be non-null, so that the target-specific emission code can
5355   // always just emit into it.
5356   TypeEvaluationKind EvalKind = getEvaluationKind(E->getType());
5357   if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
5358     Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
5359     ReturnValue = ReturnValueSlot(DestPtr, false);
5360   }
5361 
5362   // Now see if we can emit a target-specific builtin.
5363   if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
5364     switch (EvalKind) {
5365     case TEK_Scalar:
5366       return RValue::get(V);
5367     case TEK_Aggregate:
5368       return RValue::getAggregate(ReturnValue.getValue(),
5369                                   ReturnValue.isVolatile());
5370     case TEK_Complex:
5371       llvm_unreachable("No current target builtin returns complex");
5372     }
5373     llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
5374   }
5375 
5376   ErrorUnsupported(E, "builtin function");
5377 
5378   // Unknown builtin, for now just dump it out and return undef.
5379   return GetUndefRValue(E->getType());
5380 }
5381 
5382 static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
5383                                         unsigned BuiltinID, const CallExpr *E,
5384                                         ReturnValueSlot ReturnValue,
5385                                         llvm::Triple::ArchType Arch) {
5386   switch (Arch) {
5387   case llvm::Triple::arm:
5388   case llvm::Triple::armeb:
5389   case llvm::Triple::thumb:
5390   case llvm::Triple::thumbeb:
5391     return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
5392   case llvm::Triple::aarch64:
5393   case llvm::Triple::aarch64_32:
5394   case llvm::Triple::aarch64_be:
5395     return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
5396   case llvm::Triple::bpfeb:
5397   case llvm::Triple::bpfel:
5398     return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
5399   case llvm::Triple::x86:
5400   case llvm::Triple::x86_64:
5401     return CGF->EmitX86BuiltinExpr(BuiltinID, E);
5402   case llvm::Triple::ppc:
5403   case llvm::Triple::ppcle:
5404   case llvm::Triple::ppc64:
5405   case llvm::Triple::ppc64le:
5406     return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
5407   case llvm::Triple::r600:
5408   case llvm::Triple::amdgcn:
5409     return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
5410   case llvm::Triple::systemz:
5411     return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
5412   case llvm::Triple::nvptx:
5413   case llvm::Triple::nvptx64:
5414     return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
5415   case llvm::Triple::wasm32:
5416   case llvm::Triple::wasm64:
5417     return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
5418   case llvm::Triple::hexagon:
5419     return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
5420   case llvm::Triple::riscv32:
5421   case llvm::Triple::riscv64:
5422     return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
5423   default:
5424     return nullptr;
5425   }
5426 }
5427 
5428 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
5429                                               const CallExpr *E,
5430                                               ReturnValueSlot ReturnValue) {
5431   if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
5432     assert(getContext().getAuxTargetInfo() && "Missing aux target info");
5433     return EmitTargetArchBuiltinExpr(
5434         this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
5435         ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
5436   }
5437 
5438   return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
5439                                    getTarget().getTriple().getArch());
5440 }
5441 
5442 static llvm::FixedVectorType *GetNeonType(CodeGenFunction *CGF,
5443                                           NeonTypeFlags TypeFlags,
5444                                           bool HasLegalHalfType = true,
5445                                           bool V1Ty = false,
5446                                           bool AllowBFloatArgsAndRet = true) {
5447   int IsQuad = TypeFlags.isQuad();
5448   switch (TypeFlags.getEltType()) {
5449   case NeonTypeFlags::Int8:
5450   case NeonTypeFlags::Poly8:
5451     return llvm::FixedVectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
5452   case NeonTypeFlags::Int16:
5453   case NeonTypeFlags::Poly16:
5454     return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5455   case NeonTypeFlags::BFloat16:
5456     if (AllowBFloatArgsAndRet)
5457       return llvm::FixedVectorType::get(CGF->BFloatTy, V1Ty ? 1 : (4 << IsQuad));
5458     else
5459       return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5460   case NeonTypeFlags::Float16:
5461     if (HasLegalHalfType)
5462       return llvm::FixedVectorType::get(CGF->HalfTy, V1Ty ? 1 : (4 << IsQuad));
5463     else
5464       return llvm::FixedVectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
5465   case NeonTypeFlags::Int32:
5466     return llvm::FixedVectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
5467   case NeonTypeFlags::Int64:
5468   case NeonTypeFlags::Poly64:
5469     return llvm::FixedVectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
5470   case NeonTypeFlags::Poly128:
5471     // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
5472     // There is a lot of i128 and f128 API missing.
5473     // so we use v16i8 to represent poly128 and get pattern matched.
5474     return llvm::FixedVectorType::get(CGF->Int8Ty, 16);
5475   case NeonTypeFlags::Float32:
5476     return llvm::FixedVectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
5477   case NeonTypeFlags::Float64:
5478     return llvm::FixedVectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
5479   }
5480   llvm_unreachable("Unknown vector element type!");
5481 }
5482 
5483 static llvm::VectorType *GetFloatNeonType(CodeGenFunction *CGF,
5484                                           NeonTypeFlags IntTypeFlags) {
5485   int IsQuad = IntTypeFlags.isQuad();
5486   switch (IntTypeFlags.getEltType()) {
5487   case NeonTypeFlags::Int16:
5488     return llvm::FixedVectorType::get(CGF->HalfTy, (4 << IsQuad));
5489   case NeonTypeFlags::Int32:
5490     return llvm::FixedVectorType::get(CGF->FloatTy, (2 << IsQuad));
5491   case NeonTypeFlags::Int64:
5492     return llvm::FixedVectorType::get(CGF->DoubleTy, (1 << IsQuad));
5493   default:
5494     llvm_unreachable("Type can't be converted to floating-point!");
5495   }
5496 }
5497 
5498 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C,
5499                                       const ElementCount &Count) {
5500   Value *SV = llvm::ConstantVector::getSplat(Count, C);
5501   return Builder.CreateShuffleVector(V, V, SV, "lane");
5502 }
5503 
5504 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
5505   ElementCount EC = cast<llvm::VectorType>(V->getType())->getElementCount();
5506   return EmitNeonSplat(V, C, EC);
5507 }
5508 
5509 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
5510                                      const char *name,
5511                                      unsigned shift, bool rightshift) {
5512   unsigned j = 0;
5513   for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
5514        ai != ae; ++ai, ++j) {
5515     if (F->isConstrainedFPIntrinsic())
5516       if (ai->getType()->isMetadataTy())
5517         continue;
5518     if (shift > 0 && shift == j)
5519       Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
5520     else
5521       Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
5522   }
5523 
5524   if (F->isConstrainedFPIntrinsic())
5525     return Builder.CreateConstrainedFPCall(F, Ops, name);
5526   else
5527     return Builder.CreateCall(F, Ops, name);
5528 }
5529 
5530 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
5531                                             bool neg) {
5532   int SV = cast<ConstantInt>(V)->getSExtValue();
5533   return ConstantInt::get(Ty, neg ? -SV : SV);
5534 }
5535 
5536 // Right-shift a vector by a constant.
5537 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
5538                                           llvm::Type *Ty, bool usgn,
5539                                           const char *name) {
5540   llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
5541 
5542   int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
5543   int EltSize = VTy->getScalarSizeInBits();
5544 
5545   Vec = Builder.CreateBitCast(Vec, Ty);
5546 
5547   // lshr/ashr are undefined when the shift amount is equal to the vector
5548   // element size.
5549   if (ShiftAmt == EltSize) {
5550     if (usgn) {
5551       // Right-shifting an unsigned value by its size yields 0.
5552       return llvm::ConstantAggregateZero::get(VTy);
5553     } else {
5554       // Right-shifting a signed value by its size is equivalent
5555       // to a shift of size-1.
5556       --ShiftAmt;
5557       Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
5558     }
5559   }
5560 
5561   Shift = EmitNeonShiftVector(Shift, Ty, false);
5562   if (usgn)
5563     return Builder.CreateLShr(Vec, Shift, name);
5564   else
5565     return Builder.CreateAShr(Vec, Shift, name);
5566 }
5567 
5568 enum {
5569   AddRetType = (1 << 0),
5570   Add1ArgType = (1 << 1),
5571   Add2ArgTypes = (1 << 2),
5572 
5573   VectorizeRetType = (1 << 3),
5574   VectorizeArgTypes = (1 << 4),
5575 
5576   InventFloatType = (1 << 5),
5577   UnsignedAlts = (1 << 6),
5578 
5579   Use64BitVectors = (1 << 7),
5580   Use128BitVectors = (1 << 8),
5581 
5582   Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
5583   VectorRet = AddRetType | VectorizeRetType,
5584   VectorRetGetArgs01 =
5585       AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
5586   FpCmpzModifiers =
5587       AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
5588 };
5589 
5590 namespace {
5591 struct ARMVectorIntrinsicInfo {
5592   const char *NameHint;
5593   unsigned BuiltinID;
5594   unsigned LLVMIntrinsic;
5595   unsigned AltLLVMIntrinsic;
5596   uint64_t TypeModifier;
5597 
5598   bool operator<(unsigned RHSBuiltinID) const {
5599     return BuiltinID < RHSBuiltinID;
5600   }
5601   bool operator<(const ARMVectorIntrinsicInfo &TE) const {
5602     return BuiltinID < TE.BuiltinID;
5603   }
5604 };
5605 } // end anonymous namespace
5606 
5607 #define NEONMAP0(NameBase) \
5608   { #NameBase, NEON::BI__builtin_neon_ ## NameBase, 0, 0, 0 }
5609 
5610 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
5611   { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5612       Intrinsic::LLVMIntrinsic, 0, TypeModifier }
5613 
5614 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
5615   { #NameBase, NEON:: BI__builtin_neon_ ## NameBase, \
5616       Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
5617       TypeModifier }
5618 
5619 static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
5620   NEONMAP1(__a32_vcvt_bf16_v, arm_neon_vcvtfp2bf, 0),
5621   NEONMAP0(splat_lane_v),
5622   NEONMAP0(splat_laneq_v),
5623   NEONMAP0(splatq_lane_v),
5624   NEONMAP0(splatq_laneq_v),
5625   NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5626   NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
5627   NEONMAP1(vabs_v, arm_neon_vabs, 0),
5628   NEONMAP1(vabsq_v, arm_neon_vabs, 0),
5629   NEONMAP0(vadd_v),
5630   NEONMAP0(vaddhn_v),
5631   NEONMAP0(vaddq_v),
5632   NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
5633   NEONMAP1(vaeseq_v, arm_neon_aese, 0),
5634   NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
5635   NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
5636   NEONMAP1(vbfdot_v, arm_neon_bfdot, 0),
5637   NEONMAP1(vbfdotq_v, arm_neon_bfdot, 0),
5638   NEONMAP1(vbfmlalbq_v, arm_neon_bfmlalb, 0),
5639   NEONMAP1(vbfmlaltq_v, arm_neon_bfmlalt, 0),
5640   NEONMAP1(vbfmmlaq_v, arm_neon_bfmmla, 0),
5641   NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
5642   NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
5643   NEONMAP1(vcadd_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5644   NEONMAP1(vcadd_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5645   NEONMAP1(vcaddq_rot270_v, arm_neon_vcadd_rot270, Add1ArgType),
5646   NEONMAP1(vcaddq_rot90_v, arm_neon_vcadd_rot90, Add1ArgType),
5647   NEONMAP1(vcage_v, arm_neon_vacge, 0),
5648   NEONMAP1(vcageq_v, arm_neon_vacge, 0),
5649   NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
5650   NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
5651   NEONMAP1(vcale_v, arm_neon_vacge, 0),
5652   NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
5653   NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
5654   NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
5655   NEONMAP0(vceqz_v),
5656   NEONMAP0(vceqzq_v),
5657   NEONMAP0(vcgez_v),
5658   NEONMAP0(vcgezq_v),
5659   NEONMAP0(vcgtz_v),
5660   NEONMAP0(vcgtzq_v),
5661   NEONMAP0(vclez_v),
5662   NEONMAP0(vclezq_v),
5663   NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
5664   NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
5665   NEONMAP0(vcltz_v),
5666   NEONMAP0(vcltzq_v),
5667   NEONMAP1(vclz_v, ctlz, Add1ArgType),
5668   NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5669   NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5670   NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5671   NEONMAP1(vcvt_f16_f32, arm_neon_vcvtfp2hf, 0),
5672   NEONMAP0(vcvt_f16_v),
5673   NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
5674   NEONMAP0(vcvt_f32_v),
5675   NEONMAP2(vcvt_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5676   NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5677   NEONMAP1(vcvt_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5678   NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5679   NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5680   NEONMAP1(vcvt_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5681   NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5682   NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5683   NEONMAP0(vcvt_s16_v),
5684   NEONMAP0(vcvt_s32_v),
5685   NEONMAP0(vcvt_s64_v),
5686   NEONMAP0(vcvt_u16_v),
5687   NEONMAP0(vcvt_u32_v),
5688   NEONMAP0(vcvt_u64_v),
5689   NEONMAP1(vcvta_s16_v, arm_neon_vcvtas, 0),
5690   NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
5691   NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
5692   NEONMAP1(vcvta_u16_v, arm_neon_vcvtau, 0),
5693   NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
5694   NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
5695   NEONMAP1(vcvtaq_s16_v, arm_neon_vcvtas, 0),
5696   NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
5697   NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
5698   NEONMAP1(vcvtaq_u16_v, arm_neon_vcvtau, 0),
5699   NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
5700   NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
5701   NEONMAP1(vcvth_bf16_f32, arm_neon_vcvtbfp2bf, 0),
5702   NEONMAP1(vcvtm_s16_v, arm_neon_vcvtms, 0),
5703   NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
5704   NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
5705   NEONMAP1(vcvtm_u16_v, arm_neon_vcvtmu, 0),
5706   NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
5707   NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
5708   NEONMAP1(vcvtmq_s16_v, arm_neon_vcvtms, 0),
5709   NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
5710   NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
5711   NEONMAP1(vcvtmq_u16_v, arm_neon_vcvtmu, 0),
5712   NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
5713   NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
5714   NEONMAP1(vcvtn_s16_v, arm_neon_vcvtns, 0),
5715   NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
5716   NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
5717   NEONMAP1(vcvtn_u16_v, arm_neon_vcvtnu, 0),
5718   NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
5719   NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
5720   NEONMAP1(vcvtnq_s16_v, arm_neon_vcvtns, 0),
5721   NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
5722   NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
5723   NEONMAP1(vcvtnq_u16_v, arm_neon_vcvtnu, 0),
5724   NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
5725   NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
5726   NEONMAP1(vcvtp_s16_v, arm_neon_vcvtps, 0),
5727   NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
5728   NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
5729   NEONMAP1(vcvtp_u16_v, arm_neon_vcvtpu, 0),
5730   NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
5731   NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
5732   NEONMAP1(vcvtpq_s16_v, arm_neon_vcvtps, 0),
5733   NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
5734   NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
5735   NEONMAP1(vcvtpq_u16_v, arm_neon_vcvtpu, 0),
5736   NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
5737   NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
5738   NEONMAP0(vcvtq_f16_v),
5739   NEONMAP0(vcvtq_f32_v),
5740   NEONMAP2(vcvtq_n_f16_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5741   NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
5742   NEONMAP1(vcvtq_n_s16_v, arm_neon_vcvtfp2fxs, 0),
5743   NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
5744   NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
5745   NEONMAP1(vcvtq_n_u16_v, arm_neon_vcvtfp2fxu, 0),
5746   NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
5747   NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
5748   NEONMAP0(vcvtq_s16_v),
5749   NEONMAP0(vcvtq_s32_v),
5750   NEONMAP0(vcvtq_s64_v),
5751   NEONMAP0(vcvtq_u16_v),
5752   NEONMAP0(vcvtq_u32_v),
5753   NEONMAP0(vcvtq_u64_v),
5754   NEONMAP2(vdot_v, arm_neon_udot, arm_neon_sdot, 0),
5755   NEONMAP2(vdotq_v, arm_neon_udot, arm_neon_sdot, 0),
5756   NEONMAP0(vext_v),
5757   NEONMAP0(vextq_v),
5758   NEONMAP0(vfma_v),
5759   NEONMAP0(vfmaq_v),
5760   NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5761   NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
5762   NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5763   NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
5764   NEONMAP0(vld1_dup_v),
5765   NEONMAP1(vld1_v, arm_neon_vld1, 0),
5766   NEONMAP1(vld1_x2_v, arm_neon_vld1x2, 0),
5767   NEONMAP1(vld1_x3_v, arm_neon_vld1x3, 0),
5768   NEONMAP1(vld1_x4_v, arm_neon_vld1x4, 0),
5769   NEONMAP0(vld1q_dup_v),
5770   NEONMAP1(vld1q_v, arm_neon_vld1, 0),
5771   NEONMAP1(vld1q_x2_v, arm_neon_vld1x2, 0),
5772   NEONMAP1(vld1q_x3_v, arm_neon_vld1x3, 0),
5773   NEONMAP1(vld1q_x4_v, arm_neon_vld1x4, 0),
5774   NEONMAP1(vld2_dup_v, arm_neon_vld2dup, 0),
5775   NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
5776   NEONMAP1(vld2_v, arm_neon_vld2, 0),
5777   NEONMAP1(vld2q_dup_v, arm_neon_vld2dup, 0),
5778   NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
5779   NEONMAP1(vld2q_v, arm_neon_vld2, 0),
5780   NEONMAP1(vld3_dup_v, arm_neon_vld3dup, 0),
5781   NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
5782   NEONMAP1(vld3_v, arm_neon_vld3, 0),
5783   NEONMAP1(vld3q_dup_v, arm_neon_vld3dup, 0),
5784   NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
5785   NEONMAP1(vld3q_v, arm_neon_vld3, 0),
5786   NEONMAP1(vld4_dup_v, arm_neon_vld4dup, 0),
5787   NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
5788   NEONMAP1(vld4_v, arm_neon_vld4, 0),
5789   NEONMAP1(vld4q_dup_v, arm_neon_vld4dup, 0),
5790   NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
5791   NEONMAP1(vld4q_v, arm_neon_vld4, 0),
5792   NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5793   NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
5794   NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
5795   NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
5796   NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5797   NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
5798   NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
5799   NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
5800   NEONMAP2(vmmlaq_v, arm_neon_ummla, arm_neon_smmla, 0),
5801   NEONMAP0(vmovl_v),
5802   NEONMAP0(vmovn_v),
5803   NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
5804   NEONMAP0(vmull_v),
5805   NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
5806   NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5807   NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
5808   NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
5809   NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5810   NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
5811   NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
5812   NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
5813   NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
5814   NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
5815   NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
5816   NEONMAP2(vqadd_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5817   NEONMAP2(vqaddq_v, uadd_sat, sadd_sat, Add1ArgType | UnsignedAlts),
5818   NEONMAP2(vqdmlal_v, arm_neon_vqdmull, sadd_sat, 0),
5819   NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, ssub_sat, 0),
5820   NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
5821   NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
5822   NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
5823   NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
5824   NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
5825   NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
5826   NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
5827   NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
5828   NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
5829   NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5830   NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
5831   NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5832   NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5833   NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
5834   NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
5835   NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
5836   NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
5837   NEONMAP2(vqsub_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5838   NEONMAP2(vqsubq_v, usub_sat, ssub_sat, Add1ArgType | UnsignedAlts),
5839   NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
5840   NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5841   NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
5842   NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
5843   NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
5844   NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5845   NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
5846   NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
5847   NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
5848   NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
5849   NEONMAP0(vrndi_v),
5850   NEONMAP0(vrndiq_v),
5851   NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
5852   NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
5853   NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
5854   NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
5855   NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
5856   NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
5857   NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
5858   NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
5859   NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
5860   NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5861   NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
5862   NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5863   NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
5864   NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5865   NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
5866   NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
5867   NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
5868   NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
5869   NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
5870   NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
5871   NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
5872   NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
5873   NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
5874   NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
5875   NEONMAP0(vshl_n_v),
5876   NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5877   NEONMAP0(vshll_n_v),
5878   NEONMAP0(vshlq_n_v),
5879   NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
5880   NEONMAP0(vshr_n_v),
5881   NEONMAP0(vshrn_n_v),
5882   NEONMAP0(vshrq_n_v),
5883   NEONMAP1(vst1_v, arm_neon_vst1, 0),
5884   NEONMAP1(vst1_x2_v, arm_neon_vst1x2, 0),
5885   NEONMAP1(vst1_x3_v, arm_neon_vst1x3, 0),
5886   NEONMAP1(vst1_x4_v, arm_neon_vst1x4, 0),
5887   NEONMAP1(vst1q_v, arm_neon_vst1, 0),
5888   NEONMAP1(vst1q_x2_v, arm_neon_vst1x2, 0),
5889   NEONMAP1(vst1q_x3_v, arm_neon_vst1x3, 0),
5890   NEONMAP1(vst1q_x4_v, arm_neon_vst1x4, 0),
5891   NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
5892   NEONMAP1(vst2_v, arm_neon_vst2, 0),
5893   NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
5894   NEONMAP1(vst2q_v, arm_neon_vst2, 0),
5895   NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
5896   NEONMAP1(vst3_v, arm_neon_vst3, 0),
5897   NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
5898   NEONMAP1(vst3q_v, arm_neon_vst3, 0),
5899   NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
5900   NEONMAP1(vst4_v, arm_neon_vst4, 0),
5901   NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
5902   NEONMAP1(vst4q_v, arm_neon_vst4, 0),
5903   NEONMAP0(vsubhn_v),
5904   NEONMAP0(vtrn_v),
5905   NEONMAP0(vtrnq_v),
5906   NEONMAP0(vtst_v),
5907   NEONMAP0(vtstq_v),
5908   NEONMAP1(vusdot_v, arm_neon_usdot, 0),
5909   NEONMAP1(vusdotq_v, arm_neon_usdot, 0),
5910   NEONMAP1(vusmmlaq_v, arm_neon_usmmla, 0),
5911   NEONMAP0(vuzp_v),
5912   NEONMAP0(vuzpq_v),
5913   NEONMAP0(vzip_v),
5914   NEONMAP0(vzipq_v)
5915 };
5916 
5917 static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
5918   NEONMAP1(__a64_vcvtq_low_bf16_v, aarch64_neon_bfcvtn, 0),
5919   NEONMAP0(splat_lane_v),
5920   NEONMAP0(splat_laneq_v),
5921   NEONMAP0(splatq_lane_v),
5922   NEONMAP0(splatq_laneq_v),
5923   NEONMAP1(vabs_v, aarch64_neon_abs, 0),
5924   NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
5925   NEONMAP0(vadd_v),
5926   NEONMAP0(vaddhn_v),
5927   NEONMAP0(vaddq_p128),
5928   NEONMAP0(vaddq_v),
5929   NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
5930   NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
5931   NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
5932   NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
5933   NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
5934   NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
5935   NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
5936   NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
5937   NEONMAP1(vbfmlaltq_v, aarch64_neon_bfmlalt, 0),
5938   NEONMAP1(vbfmmlaq_v, aarch64_neon_bfmmla, 0),
5939   NEONMAP1(vcadd_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5940   NEONMAP1(vcadd_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5941   NEONMAP1(vcaddq_rot270_v, aarch64_neon_vcadd_rot270, Add1ArgType),
5942   NEONMAP1(vcaddq_rot90_v, aarch64_neon_vcadd_rot90, Add1ArgType),
5943   NEONMAP1(vcage_v, aarch64_neon_facge, 0),
5944   NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
5945   NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
5946   NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
5947   NEONMAP1(vcale_v, aarch64_neon_facge, 0),
5948   NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
5949   NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
5950   NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
5951   NEONMAP0(vceqz_v),
5952   NEONMAP0(vceqzq_v),
5953   NEONMAP0(vcgez_v),
5954   NEONMAP0(vcgezq_v),
5955   NEONMAP0(vcgtz_v),
5956   NEONMAP0(vcgtzq_v),
5957   NEONMAP0(vclez_v),
5958   NEONMAP0(vclezq_v),
5959   NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
5960   NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
5961   NEONMAP0(vcltz_v),
5962   NEONMAP0(vcltzq_v),
5963   NEONMAP1(vclz_v, ctlz, Add1ArgType),
5964   NEONMAP1(vclzq_v, ctlz, Add1ArgType),
5965   NEONMAP1(vcmla_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
5966   NEONMAP1(vcmla_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
5967   NEONMAP1(vcmla_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
5968   NEONMAP1(vcmla_v, aarch64_neon_vcmla_rot0, Add1ArgType),
5969   NEONMAP1(vcmlaq_rot180_v, aarch64_neon_vcmla_rot180, Add1ArgType),
5970   NEONMAP1(vcmlaq_rot270_v, aarch64_neon_vcmla_rot270, Add1ArgType),
5971   NEONMAP1(vcmlaq_rot90_v, aarch64_neon_vcmla_rot90, Add1ArgType),
5972   NEONMAP1(vcmlaq_v, aarch64_neon_vcmla_rot0, Add1ArgType),
5973   NEONMAP1(vcnt_v, ctpop, Add1ArgType),
5974   NEONMAP1(vcntq_v, ctpop, Add1ArgType),
5975   NEONMAP1(vcvt_f16_f32, aarch64_neon_vcvtfp2hf, 0),
5976   NEONMAP0(vcvt_f16_v),
5977   NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
5978   NEONMAP0(vcvt_f32_v),
5979   NEONMAP2(vcvt_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5980   NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5981   NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5982   NEONMAP1(vcvt_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5983   NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5984   NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5985   NEONMAP1(vcvt_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5986   NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5987   NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
5988   NEONMAP0(vcvtq_f16_v),
5989   NEONMAP0(vcvtq_f32_v),
5990   NEONMAP1(vcvtq_high_bf16_v, aarch64_neon_bfcvtn2, 0),
5991   NEONMAP2(vcvtq_n_f16_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5992   NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5993   NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
5994   NEONMAP1(vcvtq_n_s16_v, aarch64_neon_vcvtfp2fxs, 0),
5995   NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
5996   NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
5997   NEONMAP1(vcvtq_n_u16_v, aarch64_neon_vcvtfp2fxu, 0),
5998   NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
5999   NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
6000   NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
6001   NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
6002   NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
6003   NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
6004   NEONMAP0(vext_v),
6005   NEONMAP0(vextq_v),
6006   NEONMAP0(vfma_v),
6007   NEONMAP0(vfmaq_v),
6008   NEONMAP1(vfmlal_high_v, aarch64_neon_fmlal2, 0),
6009   NEONMAP1(vfmlal_low_v, aarch64_neon_fmlal, 0),
6010   NEONMAP1(vfmlalq_high_v, aarch64_neon_fmlal2, 0),
6011   NEONMAP1(vfmlalq_low_v, aarch64_neon_fmlal, 0),
6012   NEONMAP1(vfmlsl_high_v, aarch64_neon_fmlsl2, 0),
6013   NEONMAP1(vfmlsl_low_v, aarch64_neon_fmlsl, 0),
6014   NEONMAP1(vfmlslq_high_v, aarch64_neon_fmlsl2, 0),
6015   NEONMAP1(vfmlslq_low_v, aarch64_neon_fmlsl, 0),
6016   NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
6017   NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
6018   NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
6019   NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
6020   NEONMAP1(vld1_x2_v, aarch64_neon_ld1x2, 0),
6021   NEONMAP1(vld1_x3_v, aarch64_neon_ld1x3, 0),
6022   NEONMAP1(vld1_x4_v, aarch64_neon_ld1x4, 0),
6023   NEONMAP1(vld1q_x2_v, aarch64_neon_ld1x2, 0),
6024   NEONMAP1(vld1q_x3_v, aarch64_neon_ld1x3, 0),
6025   NEONMAP1(vld1q_x4_v, aarch64_neon_ld1x4, 0),
6026   NEONMAP2(vmmlaq_v, aarch64_neon_ummla, aarch64_neon_smmla, 0),
6027   NEONMAP0(vmovl_v),
6028   NEONMAP0(vmovn_v),
6029   NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
6030   NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
6031   NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
6032   NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
6033   NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
6034   NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
6035   NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
6036   NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
6037   NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
6038   NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
6039   NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
6040   NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
6041   NEONMAP1(vqdmulh_lane_v, aarch64_neon_sqdmulh_lane, 0),
6042   NEONMAP1(vqdmulh_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
6043   NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
6044   NEONMAP1(vqdmulhq_lane_v, aarch64_neon_sqdmulh_lane, 0),
6045   NEONMAP1(vqdmulhq_laneq_v, aarch64_neon_sqdmulh_laneq, 0),
6046   NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
6047   NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
6048   NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
6049   NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
6050   NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
6051   NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
6052   NEONMAP1(vqrdmulh_lane_v, aarch64_neon_sqrdmulh_lane, 0),
6053   NEONMAP1(vqrdmulh_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
6054   NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
6055   NEONMAP1(vqrdmulhq_lane_v, aarch64_neon_sqrdmulh_lane, 0),
6056   NEONMAP1(vqrdmulhq_laneq_v, aarch64_neon_sqrdmulh_laneq, 0),
6057   NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
6058   NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
6059   NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
6060   NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
6061   NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
6062   NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
6063   NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
6064   NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
6065   NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
6066   NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
6067   NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
6068   NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
6069   NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0),
6070   NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
6071   NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
6072   NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
6073   NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
6074   NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
6075   NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
6076   NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType),
6077   NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType),
6078   NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType),
6079   NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType),
6080   NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType),
6081   NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType),
6082   NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType),
6083   NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType),
6084   NEONMAP0(vrndi_v),
6085   NEONMAP0(vrndiq_v),
6086   NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
6087   NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
6088   NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
6089   NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
6090   NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
6091   NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
6092   NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
6093   NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
6094   NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
6095   NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
6096   NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
6097   NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
6098   NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
6099   NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
6100   NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
6101   NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0),
6102   NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0),
6103   NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0),
6104   NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0),
6105   NEONMAP0(vshl_n_v),
6106   NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
6107   NEONMAP0(vshll_n_v),
6108   NEONMAP0(vshlq_n_v),
6109   NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
6110   NEONMAP0(vshr_n_v),
6111   NEONMAP0(vshrn_n_v),
6112   NEONMAP0(vshrq_n_v),
6113   NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0),
6114   NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0),
6115   NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0),
6116   NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0),
6117   NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0),
6118   NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0),
6119   NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0),
6120   NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0),
6121   NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0),
6122   NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
6123   NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
6124   NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
6125   NEONMAP1(vst1q_x2_v, aarch64_neon_st1x2, 0),
6126   NEONMAP1(vst1q_x3_v, aarch64_neon_st1x3, 0),
6127   NEONMAP1(vst1q_x4_v, aarch64_neon_st1x4, 0),
6128   NEONMAP0(vsubhn_v),
6129   NEONMAP0(vtst_v),
6130   NEONMAP0(vtstq_v),
6131   NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
6132   NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
6133   NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
6134   NEONMAP1(vxarq_v, aarch64_crypto_xar, 0),
6135 };
6136 
6137 static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
6138   NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
6139   NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
6140   NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
6141   NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
6142   NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
6143   NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
6144   NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
6145   NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
6146   NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
6147   NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6148   NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
6149   NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
6150   NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
6151   NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
6152   NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6153   NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6154   NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
6155   NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
6156   NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
6157   NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
6158   NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
6159   NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
6160   NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
6161   NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
6162   NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6163   NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6164   NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6165   NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6166   NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6167   NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6168   NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6169   NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6170   NEONMAP1(vcvtd_s64_f64, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6171   NEONMAP1(vcvtd_u64_f64, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6172   NEONMAP1(vcvth_bf16_f32, aarch64_neon_bfcvt, 0),
6173   NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6174   NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6175   NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6176   NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6177   NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6178   NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6179   NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6180   NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6181   NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6182   NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6183   NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6184   NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6185   NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6186   NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6187   NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6188   NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6189   NEONMAP1(vcvts_s32_f32, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6190   NEONMAP1(vcvts_u32_f32, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6191   NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
6192   NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6193   NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6194   NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6195   NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6196   NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6197   NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6198   NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6199   NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6200   NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
6201   NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
6202   NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6203   NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6204   NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6205   NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6206   NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6207   NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6208   NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6209   NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6210   NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
6211   NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
6212   NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
6213   NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
6214   NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
6215   NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6216   NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
6217   NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6218   NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
6219   NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6220   NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
6221   NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6222   NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
6223   NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
6224   NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
6225   NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6226   NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
6227   NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
6228   NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
6229   NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6230   NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6231   NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
6232   NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
6233   NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
6234   NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
6235   NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
6236   NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
6237   NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
6238   NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
6239   NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
6240   NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
6241   NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
6242   NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
6243   NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6244   NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6245   NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
6246   NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
6247   NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
6248   NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6249   NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
6250   NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6251   NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
6252   NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
6253   NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
6254   NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
6255   NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
6256   NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6257   NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6258   NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
6259   NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
6260   NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
6261   NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
6262   NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
6263   NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
6264   NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
6265   NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
6266   NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6267   NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6268   NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
6269   NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
6270   NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
6271   NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6272   NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
6273   NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6274   NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6275   NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6276   NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6277   NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
6278   NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
6279   NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6280   NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6281   NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
6282   NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
6283   NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
6284   NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
6285   NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
6286   NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
6287   NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6288   NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
6289   NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
6290   NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
6291   NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
6292   NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6293   NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6294   NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
6295   NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
6296   NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
6297   NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6298   NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
6299   NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6300   NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6301   NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
6302   NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
6303   NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
6304   NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
6305   NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
6306   NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
6307   NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
6308   NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
6309   NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
6310   NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
6311   NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
6312   NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
6313   NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
6314   NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
6315   NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
6316   NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
6317   NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
6318   NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
6319   NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
6320   NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
6321   NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
6322   NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
6323   NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
6324   NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
6325   NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6326   NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
6327   NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
6328   NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
6329   NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
6330   NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
6331   NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6332   NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
6333   NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
6334   NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
6335   // FP16 scalar intrinisics go here.
6336   NEONMAP1(vabdh_f16, aarch64_sisd_fabd, Add1ArgType),
6337   NEONMAP1(vcvtah_s32_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6338   NEONMAP1(vcvtah_s64_f16, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
6339   NEONMAP1(vcvtah_u32_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6340   NEONMAP1(vcvtah_u64_f16, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
6341   NEONMAP1(vcvth_n_f16_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6342   NEONMAP1(vcvth_n_f16_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
6343   NEONMAP1(vcvth_n_f16_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6344   NEONMAP1(vcvth_n_f16_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
6345   NEONMAP1(vcvth_n_s32_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6346   NEONMAP1(vcvth_n_s64_f16, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
6347   NEONMAP1(vcvth_n_u32_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6348   NEONMAP1(vcvth_n_u64_f16, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
6349   NEONMAP1(vcvth_s32_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6350   NEONMAP1(vcvth_s64_f16, aarch64_neon_fcvtzs, AddRetType | Add1ArgType),
6351   NEONMAP1(vcvth_u32_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6352   NEONMAP1(vcvth_u64_f16, aarch64_neon_fcvtzu, AddRetType | Add1ArgType),
6353   NEONMAP1(vcvtmh_s32_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6354   NEONMAP1(vcvtmh_s64_f16, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
6355   NEONMAP1(vcvtmh_u32_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6356   NEONMAP1(vcvtmh_u64_f16, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
6357   NEONMAP1(vcvtnh_s32_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6358   NEONMAP1(vcvtnh_s64_f16, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
6359   NEONMAP1(vcvtnh_u32_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6360   NEONMAP1(vcvtnh_u64_f16, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
6361   NEONMAP1(vcvtph_s32_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6362   NEONMAP1(vcvtph_s64_f16, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
6363   NEONMAP1(vcvtph_u32_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6364   NEONMAP1(vcvtph_u64_f16, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
6365   NEONMAP1(vmulxh_f16, aarch64_neon_fmulx, Add1ArgType),
6366   NEONMAP1(vrecpeh_f16, aarch64_neon_frecpe, Add1ArgType),
6367   NEONMAP1(vrecpxh_f16, aarch64_neon_frecpx, Add1ArgType),
6368   NEONMAP1(vrsqrteh_f16, aarch64_neon_frsqrte, Add1ArgType),
6369   NEONMAP1(vrsqrtsh_f16, aarch64_neon_frsqrts, Add1ArgType),
6370 };
6371 
6372 #undef NEONMAP0
6373 #undef NEONMAP1
6374 #undef NEONMAP2
6375 
6376 #define SVEMAP1(NameBase, LLVMIntrinsic, TypeModifier)                         \
6377   {                                                                            \
6378     #NameBase, SVE::BI__builtin_sve_##NameBase, Intrinsic::LLVMIntrinsic, 0,   \
6379         TypeModifier                                                           \
6380   }
6381 
6382 #define SVEMAP2(NameBase, TypeModifier)                                        \
6383   { #NameBase, SVE::BI__builtin_sve_##NameBase, 0, 0, TypeModifier }
6384 static const ARMVectorIntrinsicInfo AArch64SVEIntrinsicMap[] = {
6385 #define GET_SVE_LLVM_INTRINSIC_MAP
6386 #include "clang/Basic/arm_sve_builtin_cg.inc"
6387 #undef GET_SVE_LLVM_INTRINSIC_MAP
6388 };
6389 
6390 #undef SVEMAP1
6391 #undef SVEMAP2
6392 
6393 static bool NEONSIMDIntrinsicsProvenSorted = false;
6394 
6395 static bool AArch64SIMDIntrinsicsProvenSorted = false;
6396 static bool AArch64SISDIntrinsicsProvenSorted = false;
6397 static bool AArch64SVEIntrinsicsProvenSorted = false;
6398 
6399 static const ARMVectorIntrinsicInfo *
6400 findARMVectorIntrinsicInMap(ArrayRef<ARMVectorIntrinsicInfo> IntrinsicMap,
6401                             unsigned BuiltinID, bool &MapProvenSorted) {
6402 
6403 #ifndef NDEBUG
6404   if (!MapProvenSorted) {
6405     assert(llvm::is_sorted(IntrinsicMap));
6406     MapProvenSorted = true;
6407   }
6408 #endif
6409 
6410   const ARMVectorIntrinsicInfo *Builtin =
6411       llvm::lower_bound(IntrinsicMap, BuiltinID);
6412 
6413   if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
6414     return Builtin;
6415 
6416   return nullptr;
6417 }
6418 
6419 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
6420                                                    unsigned Modifier,
6421                                                    llvm::Type *ArgType,
6422                                                    const CallExpr *E) {
6423   int VectorSize = 0;
6424   if (Modifier & Use64BitVectors)
6425     VectorSize = 64;
6426   else if (Modifier & Use128BitVectors)
6427     VectorSize = 128;
6428 
6429   // Return type.
6430   SmallVector<llvm::Type *, 3> Tys;
6431   if (Modifier & AddRetType) {
6432     llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
6433     if (Modifier & VectorizeRetType)
6434       Ty = llvm::FixedVectorType::get(
6435           Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
6436 
6437     Tys.push_back(Ty);
6438   }
6439 
6440   // Arguments.
6441   if (Modifier & VectorizeArgTypes) {
6442     int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
6443     ArgType = llvm::FixedVectorType::get(ArgType, Elts);
6444   }
6445 
6446   if (Modifier & (Add1ArgType | Add2ArgTypes))
6447     Tys.push_back(ArgType);
6448 
6449   if (Modifier & Add2ArgTypes)
6450     Tys.push_back(ArgType);
6451 
6452   if (Modifier & InventFloatType)
6453     Tys.push_back(FloatTy);
6454 
6455   return CGM.getIntrinsic(IntrinsicID, Tys);
6456 }
6457 
6458 static Value *EmitCommonNeonSISDBuiltinExpr(
6459     CodeGenFunction &CGF, const ARMVectorIntrinsicInfo &SISDInfo,
6460     SmallVectorImpl<Value *> &Ops, const CallExpr *E) {
6461   unsigned BuiltinID = SISDInfo.BuiltinID;
6462   unsigned int Int = SISDInfo.LLVMIntrinsic;
6463   unsigned Modifier = SISDInfo.TypeModifier;
6464   const char *s = SISDInfo.NameHint;
6465 
6466   switch (BuiltinID) {
6467   case NEON::BI__builtin_neon_vcled_s64:
6468   case NEON::BI__builtin_neon_vcled_u64:
6469   case NEON::BI__builtin_neon_vcles_f32:
6470   case NEON::BI__builtin_neon_vcled_f64:
6471   case NEON::BI__builtin_neon_vcltd_s64:
6472   case NEON::BI__builtin_neon_vcltd_u64:
6473   case NEON::BI__builtin_neon_vclts_f32:
6474   case NEON::BI__builtin_neon_vcltd_f64:
6475   case NEON::BI__builtin_neon_vcales_f32:
6476   case NEON::BI__builtin_neon_vcaled_f64:
6477   case NEON::BI__builtin_neon_vcalts_f32:
6478   case NEON::BI__builtin_neon_vcaltd_f64:
6479     // Only one direction of comparisons actually exist, cmle is actually a cmge
6480     // with swapped operands. The table gives us the right intrinsic but we
6481     // still need to do the swap.
6482     std::swap(Ops[0], Ops[1]);
6483     break;
6484   }
6485 
6486   assert(Int && "Generic code assumes a valid intrinsic");
6487 
6488   // Determine the type(s) of this overloaded AArch64 intrinsic.
6489   const Expr *Arg = E->getArg(0);
6490   llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
6491   Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
6492 
6493   int j = 0;
6494   ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
6495   for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
6496        ai != ae; ++ai, ++j) {
6497     llvm::Type *ArgTy = ai->getType();
6498     if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
6499              ArgTy->getPrimitiveSizeInBits())
6500       continue;
6501 
6502     assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
6503     // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
6504     // it before inserting.
6505     Ops[j] = CGF.Builder.CreateTruncOrBitCast(
6506         Ops[j], cast<llvm::VectorType>(ArgTy)->getElementType());
6507     Ops[j] =
6508         CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
6509   }
6510 
6511   Value *Result = CGF.EmitNeonCall(F, Ops, s);
6512   llvm::Type *ResultType = CGF.ConvertType(E->getType());
6513   if (ResultType->getPrimitiveSizeInBits().getFixedSize() <
6514       Result->getType()->getPrimitiveSizeInBits().getFixedSize())
6515     return CGF.Builder.CreateExtractElement(Result, C0);
6516 
6517   return CGF.Builder.CreateBitCast(Result, ResultType, s);
6518 }
6519 
6520 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
6521     unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
6522     const char *NameHint, unsigned Modifier, const CallExpr *E,
6523     SmallVectorImpl<llvm::Value *> &Ops, Address PtrOp0, Address PtrOp1,
6524     llvm::Triple::ArchType Arch) {
6525   // Get the last argument, which specifies the vector type.
6526   const Expr *Arg = E->getArg(E->getNumArgs() - 1);
6527   Optional<llvm::APSInt> NeonTypeConst =
6528       Arg->getIntegerConstantExpr(getContext());
6529   if (!NeonTypeConst)
6530     return nullptr;
6531 
6532   // Determine the type of this overloaded NEON intrinsic.
6533   NeonTypeFlags Type(NeonTypeConst->getZExtValue());
6534   bool Usgn = Type.isUnsigned();
6535   bool Quad = Type.isQuad();
6536   const bool HasLegalHalfType = getTarget().hasLegalHalfType();
6537   const bool AllowBFloatArgsAndRet =
6538       getTargetHooks().getABIInfo().allowBFloatArgsAndRet();
6539 
6540   llvm::FixedVectorType *VTy =
6541       GetNeonType(this, Type, HasLegalHalfType, false, AllowBFloatArgsAndRet);
6542   llvm::Type *Ty = VTy;
6543   if (!Ty)
6544     return nullptr;
6545 
6546   auto getAlignmentValue32 = [&](Address addr) -> Value* {
6547     return Builder.getInt32(addr.getAlignment().getQuantity());
6548   };
6549 
6550   unsigned Int = LLVMIntrinsic;
6551   if ((Modifier & UnsignedAlts) && !Usgn)
6552     Int = AltLLVMIntrinsic;
6553 
6554   switch (BuiltinID) {
6555   default: break;
6556   case NEON::BI__builtin_neon_splat_lane_v:
6557   case NEON::BI__builtin_neon_splat_laneq_v:
6558   case NEON::BI__builtin_neon_splatq_lane_v:
6559   case NEON::BI__builtin_neon_splatq_laneq_v: {
6560     auto NumElements = VTy->getElementCount();
6561     if (BuiltinID == NEON::BI__builtin_neon_splatq_lane_v)
6562       NumElements = NumElements * 2;
6563     if (BuiltinID == NEON::BI__builtin_neon_splat_laneq_v)
6564       NumElements = NumElements.divideCoefficientBy(2);
6565 
6566     Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
6567     return EmitNeonSplat(Ops[0], cast<ConstantInt>(Ops[1]), NumElements);
6568   }
6569   case NEON::BI__builtin_neon_vpadd_v:
6570   case NEON::BI__builtin_neon_vpaddq_v:
6571     // We don't allow fp/int overloading of intrinsics.
6572     if (VTy->getElementType()->isFloatingPointTy() &&
6573         Int == Intrinsic::aarch64_neon_addp)
6574       Int = Intrinsic::aarch64_neon_faddp;
6575     break;
6576   case NEON::BI__builtin_neon_vabs_v:
6577   case NEON::BI__builtin_neon_vabsq_v:
6578     if (VTy->getElementType()->isFloatingPointTy())
6579       return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
6580     return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
6581   case NEON::BI__builtin_neon_vadd_v:
6582   case NEON::BI__builtin_neon_vaddq_v: {
6583     llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8);
6584     Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
6585     Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
6586     Ops[0] =  Builder.CreateXor(Ops[0], Ops[1]);
6587     return Builder.CreateBitCast(Ops[0], Ty);
6588   }
6589   case NEON::BI__builtin_neon_vaddhn_v: {
6590     llvm::FixedVectorType *SrcTy =
6591         llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6592 
6593     // %sum = add <4 x i32> %lhs, %rhs
6594     Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6595     Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
6596     Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
6597 
6598     // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
6599     Constant *ShiftAmt =
6600         ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
6601     Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
6602 
6603     // %res = trunc <4 x i32> %high to <4 x i16>
6604     return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
6605   }
6606   case NEON::BI__builtin_neon_vcale_v:
6607   case NEON::BI__builtin_neon_vcaleq_v:
6608   case NEON::BI__builtin_neon_vcalt_v:
6609   case NEON::BI__builtin_neon_vcaltq_v:
6610     std::swap(Ops[0], Ops[1]);
6611     LLVM_FALLTHROUGH;
6612   case NEON::BI__builtin_neon_vcage_v:
6613   case NEON::BI__builtin_neon_vcageq_v:
6614   case NEON::BI__builtin_neon_vcagt_v:
6615   case NEON::BI__builtin_neon_vcagtq_v: {
6616     llvm::Type *Ty;
6617     switch (VTy->getScalarSizeInBits()) {
6618     default: llvm_unreachable("unexpected type");
6619     case 32:
6620       Ty = FloatTy;
6621       break;
6622     case 64:
6623       Ty = DoubleTy;
6624       break;
6625     case 16:
6626       Ty = HalfTy;
6627       break;
6628     }
6629     auto *VecFlt = llvm::FixedVectorType::get(Ty, VTy->getNumElements());
6630     llvm::Type *Tys[] = { VTy, VecFlt };
6631     Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6632     return EmitNeonCall(F, Ops, NameHint);
6633   }
6634   case NEON::BI__builtin_neon_vceqz_v:
6635   case NEON::BI__builtin_neon_vceqzq_v:
6636     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
6637                                          ICmpInst::ICMP_EQ, "vceqz");
6638   case NEON::BI__builtin_neon_vcgez_v:
6639   case NEON::BI__builtin_neon_vcgezq_v:
6640     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
6641                                          ICmpInst::ICMP_SGE, "vcgez");
6642   case NEON::BI__builtin_neon_vclez_v:
6643   case NEON::BI__builtin_neon_vclezq_v:
6644     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
6645                                          ICmpInst::ICMP_SLE, "vclez");
6646   case NEON::BI__builtin_neon_vcgtz_v:
6647   case NEON::BI__builtin_neon_vcgtzq_v:
6648     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
6649                                          ICmpInst::ICMP_SGT, "vcgtz");
6650   case NEON::BI__builtin_neon_vcltz_v:
6651   case NEON::BI__builtin_neon_vcltzq_v:
6652     return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
6653                                          ICmpInst::ICMP_SLT, "vcltz");
6654   case NEON::BI__builtin_neon_vclz_v:
6655   case NEON::BI__builtin_neon_vclzq_v:
6656     // We generate target-independent intrinsic, which needs a second argument
6657     // for whether or not clz of zero is undefined; on ARM it isn't.
6658     Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
6659     break;
6660   case NEON::BI__builtin_neon_vcvt_f32_v:
6661   case NEON::BI__builtin_neon_vcvtq_f32_v:
6662     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6663     Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad),
6664                      HasLegalHalfType);
6665     return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6666                 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6667   case NEON::BI__builtin_neon_vcvt_f16_v:
6668   case NEON::BI__builtin_neon_vcvtq_f16_v:
6669     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6670     Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float16, false, Quad),
6671                      HasLegalHalfType);
6672     return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
6673                 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
6674   case NEON::BI__builtin_neon_vcvt_n_f16_v:
6675   case NEON::BI__builtin_neon_vcvt_n_f32_v:
6676   case NEON::BI__builtin_neon_vcvt_n_f64_v:
6677   case NEON::BI__builtin_neon_vcvtq_n_f16_v:
6678   case NEON::BI__builtin_neon_vcvtq_n_f32_v:
6679   case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
6680     llvm::Type *Tys[2] = { GetFloatNeonType(this, Type), Ty };
6681     Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
6682     Function *F = CGM.getIntrinsic(Int, Tys);
6683     return EmitNeonCall(F, Ops, "vcvt_n");
6684   }
6685   case NEON::BI__builtin_neon_vcvt_n_s16_v:
6686   case NEON::BI__builtin_neon_vcvt_n_s32_v:
6687   case NEON::BI__builtin_neon_vcvt_n_u16_v:
6688   case NEON::BI__builtin_neon_vcvt_n_u32_v:
6689   case NEON::BI__builtin_neon_vcvt_n_s64_v:
6690   case NEON::BI__builtin_neon_vcvt_n_u64_v:
6691   case NEON::BI__builtin_neon_vcvtq_n_s16_v:
6692   case NEON::BI__builtin_neon_vcvtq_n_s32_v:
6693   case NEON::BI__builtin_neon_vcvtq_n_u16_v:
6694   case NEON::BI__builtin_neon_vcvtq_n_u32_v:
6695   case NEON::BI__builtin_neon_vcvtq_n_s64_v:
6696   case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
6697     llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6698     Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6699     return EmitNeonCall(F, Ops, "vcvt_n");
6700   }
6701   case NEON::BI__builtin_neon_vcvt_s32_v:
6702   case NEON::BI__builtin_neon_vcvt_u32_v:
6703   case NEON::BI__builtin_neon_vcvt_s64_v:
6704   case NEON::BI__builtin_neon_vcvt_u64_v:
6705   case NEON::BI__builtin_neon_vcvt_s16_v:
6706   case NEON::BI__builtin_neon_vcvt_u16_v:
6707   case NEON::BI__builtin_neon_vcvtq_s32_v:
6708   case NEON::BI__builtin_neon_vcvtq_u32_v:
6709   case NEON::BI__builtin_neon_vcvtq_s64_v:
6710   case NEON::BI__builtin_neon_vcvtq_u64_v:
6711   case NEON::BI__builtin_neon_vcvtq_s16_v:
6712   case NEON::BI__builtin_neon_vcvtq_u16_v: {
6713     Ops[0] = Builder.CreateBitCast(Ops[0], GetFloatNeonType(this, Type));
6714     return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
6715                 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
6716   }
6717   case NEON::BI__builtin_neon_vcvta_s16_v:
6718   case NEON::BI__builtin_neon_vcvta_s32_v:
6719   case NEON::BI__builtin_neon_vcvta_s64_v:
6720   case NEON::BI__builtin_neon_vcvta_u16_v:
6721   case NEON::BI__builtin_neon_vcvta_u32_v:
6722   case NEON::BI__builtin_neon_vcvta_u64_v:
6723   case NEON::BI__builtin_neon_vcvtaq_s16_v:
6724   case NEON::BI__builtin_neon_vcvtaq_s32_v:
6725   case NEON::BI__builtin_neon_vcvtaq_s64_v:
6726   case NEON::BI__builtin_neon_vcvtaq_u16_v:
6727   case NEON::BI__builtin_neon_vcvtaq_u32_v:
6728   case NEON::BI__builtin_neon_vcvtaq_u64_v:
6729   case NEON::BI__builtin_neon_vcvtn_s16_v:
6730   case NEON::BI__builtin_neon_vcvtn_s32_v:
6731   case NEON::BI__builtin_neon_vcvtn_s64_v:
6732   case NEON::BI__builtin_neon_vcvtn_u16_v:
6733   case NEON::BI__builtin_neon_vcvtn_u32_v:
6734   case NEON::BI__builtin_neon_vcvtn_u64_v:
6735   case NEON::BI__builtin_neon_vcvtnq_s16_v:
6736   case NEON::BI__builtin_neon_vcvtnq_s32_v:
6737   case NEON::BI__builtin_neon_vcvtnq_s64_v:
6738   case NEON::BI__builtin_neon_vcvtnq_u16_v:
6739   case NEON::BI__builtin_neon_vcvtnq_u32_v:
6740   case NEON::BI__builtin_neon_vcvtnq_u64_v:
6741   case NEON::BI__builtin_neon_vcvtp_s16_v:
6742   case NEON::BI__builtin_neon_vcvtp_s32_v:
6743   case NEON::BI__builtin_neon_vcvtp_s64_v:
6744   case NEON::BI__builtin_neon_vcvtp_u16_v:
6745   case NEON::BI__builtin_neon_vcvtp_u32_v:
6746   case NEON::BI__builtin_neon_vcvtp_u64_v:
6747   case NEON::BI__builtin_neon_vcvtpq_s16_v:
6748   case NEON::BI__builtin_neon_vcvtpq_s32_v:
6749   case NEON::BI__builtin_neon_vcvtpq_s64_v:
6750   case NEON::BI__builtin_neon_vcvtpq_u16_v:
6751   case NEON::BI__builtin_neon_vcvtpq_u32_v:
6752   case NEON::BI__builtin_neon_vcvtpq_u64_v:
6753   case NEON::BI__builtin_neon_vcvtm_s16_v:
6754   case NEON::BI__builtin_neon_vcvtm_s32_v:
6755   case NEON::BI__builtin_neon_vcvtm_s64_v:
6756   case NEON::BI__builtin_neon_vcvtm_u16_v:
6757   case NEON::BI__builtin_neon_vcvtm_u32_v:
6758   case NEON::BI__builtin_neon_vcvtm_u64_v:
6759   case NEON::BI__builtin_neon_vcvtmq_s16_v:
6760   case NEON::BI__builtin_neon_vcvtmq_s32_v:
6761   case NEON::BI__builtin_neon_vcvtmq_s64_v:
6762   case NEON::BI__builtin_neon_vcvtmq_u16_v:
6763   case NEON::BI__builtin_neon_vcvtmq_u32_v:
6764   case NEON::BI__builtin_neon_vcvtmq_u64_v: {
6765     llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
6766     return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6767   }
6768   case NEON::BI__builtin_neon_vcvtx_f32_v: {
6769     llvm::Type *Tys[2] = { VTy->getTruncatedElementVectorType(VTy), Ty};
6770     return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
6771 
6772   }
6773   case NEON::BI__builtin_neon_vext_v:
6774   case NEON::BI__builtin_neon_vextq_v: {
6775     int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
6776     SmallVector<int, 16> Indices;
6777     for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
6778       Indices.push_back(i+CV);
6779 
6780     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6781     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6782     return Builder.CreateShuffleVector(Ops[0], Ops[1], Indices, "vext");
6783   }
6784   case NEON::BI__builtin_neon_vfma_v:
6785   case NEON::BI__builtin_neon_vfmaq_v: {
6786     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6787     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
6788     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
6789 
6790     // NEON intrinsic puts accumulator first, unlike the LLVM fma.
6791     return emitCallMaybeConstrainedFPBuiltin(
6792         *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
6793         {Ops[1], Ops[2], Ops[0]});
6794   }
6795   case NEON::BI__builtin_neon_vld1_v:
6796   case NEON::BI__builtin_neon_vld1q_v: {
6797     llvm::Type *Tys[] = {Ty, Int8PtrTy};
6798     Ops.push_back(getAlignmentValue32(PtrOp0));
6799     return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "vld1");
6800   }
6801   case NEON::BI__builtin_neon_vld1_x2_v:
6802   case NEON::BI__builtin_neon_vld1q_x2_v:
6803   case NEON::BI__builtin_neon_vld1_x3_v:
6804   case NEON::BI__builtin_neon_vld1q_x3_v:
6805   case NEON::BI__builtin_neon_vld1_x4_v:
6806   case NEON::BI__builtin_neon_vld1q_x4_v: {
6807     llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
6808     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
6809     llvm::Type *Tys[2] = { VTy, PTy };
6810     Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6811     Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
6812     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6813     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6814     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6815   }
6816   case NEON::BI__builtin_neon_vld2_v:
6817   case NEON::BI__builtin_neon_vld2q_v:
6818   case NEON::BI__builtin_neon_vld3_v:
6819   case NEON::BI__builtin_neon_vld3q_v:
6820   case NEON::BI__builtin_neon_vld4_v:
6821   case NEON::BI__builtin_neon_vld4q_v:
6822   case NEON::BI__builtin_neon_vld2_dup_v:
6823   case NEON::BI__builtin_neon_vld2q_dup_v:
6824   case NEON::BI__builtin_neon_vld3_dup_v:
6825   case NEON::BI__builtin_neon_vld3q_dup_v:
6826   case NEON::BI__builtin_neon_vld4_dup_v:
6827   case NEON::BI__builtin_neon_vld4q_dup_v: {
6828     llvm::Type *Tys[] = {Ty, Int8PtrTy};
6829     Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6830     Value *Align = getAlignmentValue32(PtrOp1);
6831     Ops[1] = Builder.CreateCall(F, {Ops[1], Align}, NameHint);
6832     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6833     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6834     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6835   }
6836   case NEON::BI__builtin_neon_vld1_dup_v:
6837   case NEON::BI__builtin_neon_vld1q_dup_v: {
6838     Value *V = UndefValue::get(Ty);
6839     Ty = llvm::PointerType::getUnqual(VTy->getElementType());
6840     PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
6841     LoadInst *Ld = Builder.CreateLoad(PtrOp0);
6842     llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
6843     Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
6844     return EmitNeonSplat(Ops[0], CI);
6845   }
6846   case NEON::BI__builtin_neon_vld2_lane_v:
6847   case NEON::BI__builtin_neon_vld2q_lane_v:
6848   case NEON::BI__builtin_neon_vld3_lane_v:
6849   case NEON::BI__builtin_neon_vld3q_lane_v:
6850   case NEON::BI__builtin_neon_vld4_lane_v:
6851   case NEON::BI__builtin_neon_vld4q_lane_v: {
6852     llvm::Type *Tys[] = {Ty, Int8PtrTy};
6853     Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
6854     for (unsigned I = 2; I < Ops.size() - 1; ++I)
6855       Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
6856     Ops.push_back(getAlignmentValue32(PtrOp1));
6857     Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
6858     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
6859     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
6860     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
6861   }
6862   case NEON::BI__builtin_neon_vmovl_v: {
6863     llvm::FixedVectorType *DTy =
6864         llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6865     Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
6866     if (Usgn)
6867       return Builder.CreateZExt(Ops[0], Ty, "vmovl");
6868     return Builder.CreateSExt(Ops[0], Ty, "vmovl");
6869   }
6870   case NEON::BI__builtin_neon_vmovn_v: {
6871     llvm::FixedVectorType *QTy =
6872         llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6873     Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
6874     return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
6875   }
6876   case NEON::BI__builtin_neon_vmull_v:
6877     // FIXME: the integer vmull operations could be emitted in terms of pure
6878     // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
6879     // hoisting the exts outside loops. Until global ISel comes along that can
6880     // see through such movement this leads to bad CodeGen. So we need an
6881     // intrinsic for now.
6882     Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
6883     Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
6884     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
6885   case NEON::BI__builtin_neon_vpadal_v:
6886   case NEON::BI__builtin_neon_vpadalq_v: {
6887     // The source operand type has twice as many elements of half the size.
6888     unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6889     llvm::Type *EltTy =
6890       llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6891     auto *NarrowTy =
6892         llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6893     llvm::Type *Tys[2] = { Ty, NarrowTy };
6894     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6895   }
6896   case NEON::BI__builtin_neon_vpaddl_v:
6897   case NEON::BI__builtin_neon_vpaddlq_v: {
6898     // The source operand type has twice as many elements of half the size.
6899     unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
6900     llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
6901     auto *NarrowTy =
6902         llvm::FixedVectorType::get(EltTy, VTy->getNumElements() * 2);
6903     llvm::Type *Tys[2] = { Ty, NarrowTy };
6904     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
6905   }
6906   case NEON::BI__builtin_neon_vqdmlal_v:
6907   case NEON::BI__builtin_neon_vqdmlsl_v: {
6908     SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
6909     Ops[1] =
6910         EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), MulOps, "vqdmlal");
6911     Ops.resize(2);
6912     return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty), Ops, NameHint);
6913   }
6914   case NEON::BI__builtin_neon_vqdmulhq_lane_v:
6915   case NEON::BI__builtin_neon_vqdmulh_lane_v:
6916   case NEON::BI__builtin_neon_vqrdmulhq_lane_v:
6917   case NEON::BI__builtin_neon_vqrdmulh_lane_v: {
6918     auto *RTy = cast<llvm::FixedVectorType>(Ty);
6919     if (BuiltinID == NEON::BI__builtin_neon_vqdmulhq_lane_v ||
6920         BuiltinID == NEON::BI__builtin_neon_vqrdmulhq_lane_v)
6921       RTy = llvm::FixedVectorType::get(RTy->getElementType(),
6922                                        RTy->getNumElements() * 2);
6923     llvm::Type *Tys[2] = {
6924         RTy, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6925                                              /*isQuad*/ false))};
6926     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6927   }
6928   case NEON::BI__builtin_neon_vqdmulhq_laneq_v:
6929   case NEON::BI__builtin_neon_vqdmulh_laneq_v:
6930   case NEON::BI__builtin_neon_vqrdmulhq_laneq_v:
6931   case NEON::BI__builtin_neon_vqrdmulh_laneq_v: {
6932     llvm::Type *Tys[2] = {
6933         Ty, GetNeonType(this, NeonTypeFlags(Type.getEltType(), false,
6934                                             /*isQuad*/ true))};
6935     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
6936   }
6937   case NEON::BI__builtin_neon_vqshl_n_v:
6938   case NEON::BI__builtin_neon_vqshlq_n_v:
6939     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
6940                         1, false);
6941   case NEON::BI__builtin_neon_vqshlu_n_v:
6942   case NEON::BI__builtin_neon_vqshluq_n_v:
6943     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
6944                         1, false);
6945   case NEON::BI__builtin_neon_vrecpe_v:
6946   case NEON::BI__builtin_neon_vrecpeq_v:
6947   case NEON::BI__builtin_neon_vrsqrte_v:
6948   case NEON::BI__builtin_neon_vrsqrteq_v:
6949     Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
6950     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6951   case NEON::BI__builtin_neon_vrndi_v:
6952   case NEON::BI__builtin_neon_vrndiq_v:
6953     Int = Builder.getIsFPConstrained()
6954               ? Intrinsic::experimental_constrained_nearbyint
6955               : Intrinsic::nearbyint;
6956     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
6957   case NEON::BI__builtin_neon_vrshr_n_v:
6958   case NEON::BI__builtin_neon_vrshrq_n_v:
6959     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
6960                         1, true);
6961   case NEON::BI__builtin_neon_vsha512hq_v:
6962   case NEON::BI__builtin_neon_vsha512h2q_v:
6963   case NEON::BI__builtin_neon_vsha512su0q_v:
6964   case NEON::BI__builtin_neon_vsha512su1q_v: {
6965     Function *F = CGM.getIntrinsic(Int);
6966     return EmitNeonCall(F, Ops, "");
6967   }
6968   case NEON::BI__builtin_neon_vshl_n_v:
6969   case NEON::BI__builtin_neon_vshlq_n_v:
6970     Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
6971     return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
6972                              "vshl_n");
6973   case NEON::BI__builtin_neon_vshll_n_v: {
6974     llvm::FixedVectorType *SrcTy =
6975         llvm::FixedVectorType::getTruncatedElementVectorType(VTy);
6976     Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6977     if (Usgn)
6978       Ops[0] = Builder.CreateZExt(Ops[0], VTy);
6979     else
6980       Ops[0] = Builder.CreateSExt(Ops[0], VTy);
6981     Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
6982     return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
6983   }
6984   case NEON::BI__builtin_neon_vshrn_n_v: {
6985     llvm::FixedVectorType *SrcTy =
6986         llvm::FixedVectorType::getExtendedElementVectorType(VTy);
6987     Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
6988     Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
6989     if (Usgn)
6990       Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
6991     else
6992       Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
6993     return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
6994   }
6995   case NEON::BI__builtin_neon_vshr_n_v:
6996   case NEON::BI__builtin_neon_vshrq_n_v:
6997     return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
6998   case NEON::BI__builtin_neon_vst1_v:
6999   case NEON::BI__builtin_neon_vst1q_v:
7000   case NEON::BI__builtin_neon_vst2_v:
7001   case NEON::BI__builtin_neon_vst2q_v:
7002   case NEON::BI__builtin_neon_vst3_v:
7003   case NEON::BI__builtin_neon_vst3q_v:
7004   case NEON::BI__builtin_neon_vst4_v:
7005   case NEON::BI__builtin_neon_vst4q_v:
7006   case NEON::BI__builtin_neon_vst2_lane_v:
7007   case NEON::BI__builtin_neon_vst2q_lane_v:
7008   case NEON::BI__builtin_neon_vst3_lane_v:
7009   case NEON::BI__builtin_neon_vst3q_lane_v:
7010   case NEON::BI__builtin_neon_vst4_lane_v:
7011   case NEON::BI__builtin_neon_vst4q_lane_v: {
7012     llvm::Type *Tys[] = {Int8PtrTy, Ty};
7013     Ops.push_back(getAlignmentValue32(PtrOp0));
7014     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
7015   }
7016   case NEON::BI__builtin_neon_vsm3partw1q_v:
7017   case NEON::BI__builtin_neon_vsm3partw2q_v:
7018   case NEON::BI__builtin_neon_vsm3ss1q_v:
7019   case NEON::BI__builtin_neon_vsm4ekeyq_v:
7020   case NEON::BI__builtin_neon_vsm4eq_v: {
7021     Function *F = CGM.getIntrinsic(Int);
7022     return EmitNeonCall(F, Ops, "");
7023   }
7024   case NEON::BI__builtin_neon_vsm3tt1aq_v:
7025   case NEON::BI__builtin_neon_vsm3tt1bq_v:
7026   case NEON::BI__builtin_neon_vsm3tt2aq_v:
7027   case NEON::BI__builtin_neon_vsm3tt2bq_v: {
7028     Function *F = CGM.getIntrinsic(Int);
7029     Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
7030     return EmitNeonCall(F, Ops, "");
7031   }
7032   case NEON::BI__builtin_neon_vst1_x2_v:
7033   case NEON::BI__builtin_neon_vst1q_x2_v:
7034   case NEON::BI__builtin_neon_vst1_x3_v:
7035   case NEON::BI__builtin_neon_vst1q_x3_v:
7036   case NEON::BI__builtin_neon_vst1_x4_v:
7037   case NEON::BI__builtin_neon_vst1q_x4_v: {
7038     llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getElementType());
7039     // TODO: Currently in AArch32 mode the pointer operand comes first, whereas
7040     // in AArch64 it comes last. We may want to stick to one or another.
7041     if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_be ||
7042         Arch == llvm::Triple::aarch64_32) {
7043       llvm::Type *Tys[2] = { VTy, PTy };
7044       std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
7045       return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
7046     }
7047     llvm::Type *Tys[2] = { PTy, VTy };
7048     return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, "");
7049   }
7050   case NEON::BI__builtin_neon_vsubhn_v: {
7051     llvm::FixedVectorType *SrcTy =
7052         llvm::FixedVectorType::getExtendedElementVectorType(VTy);
7053 
7054     // %sum = add <4 x i32> %lhs, %rhs
7055     Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
7056     Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
7057     Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
7058 
7059     // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
7060     Constant *ShiftAmt =
7061         ConstantInt::get(SrcTy, SrcTy->getScalarSizeInBits() / 2);
7062     Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
7063 
7064     // %res = trunc <4 x i32> %high to <4 x i16>
7065     return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
7066   }
7067   case NEON::BI__builtin_neon_vtrn_v:
7068   case NEON::BI__builtin_neon_vtrnq_v: {
7069     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7070     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7071     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7072     Value *SV = nullptr;
7073 
7074     for (unsigned vi = 0; vi != 2; ++vi) {
7075       SmallVector<int, 16> Indices;
7076       for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7077         Indices.push_back(i+vi);
7078         Indices.push_back(i+e+vi);
7079       }
7080       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7081       SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
7082       SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7083     }
7084     return SV;
7085   }
7086   case NEON::BI__builtin_neon_vtst_v:
7087   case NEON::BI__builtin_neon_vtstq_v: {
7088     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
7089     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7090     Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
7091     Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
7092                                 ConstantAggregateZero::get(Ty));
7093     return Builder.CreateSExt(Ops[0], Ty, "vtst");
7094   }
7095   case NEON::BI__builtin_neon_vuzp_v:
7096   case NEON::BI__builtin_neon_vuzpq_v: {
7097     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7098     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7099     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7100     Value *SV = nullptr;
7101 
7102     for (unsigned vi = 0; vi != 2; ++vi) {
7103       SmallVector<int, 16> Indices;
7104       for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
7105         Indices.push_back(2*i+vi);
7106 
7107       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7108       SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
7109       SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7110     }
7111     return SV;
7112   }
7113   case NEON::BI__builtin_neon_vxarq_v: {
7114     Function *F = CGM.getIntrinsic(Int);
7115     Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
7116     return EmitNeonCall(F, Ops, "");
7117   }
7118   case NEON::BI__builtin_neon_vzip_v:
7119   case NEON::BI__builtin_neon_vzipq_v: {
7120     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
7121     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7122     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
7123     Value *SV = nullptr;
7124 
7125     for (unsigned vi = 0; vi != 2; ++vi) {
7126       SmallVector<int, 16> Indices;
7127       for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
7128         Indices.push_back((i + vi*e) >> 1);
7129         Indices.push_back(((i + vi*e) >> 1)+e);
7130       }
7131       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
7132       SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
7133       SV = Builder.CreateDefaultAlignedStore(SV, Addr);
7134     }
7135     return SV;
7136   }
7137   case NEON::BI__builtin_neon_vdot_v:
7138   case NEON::BI__builtin_neon_vdotq_v: {
7139     auto *InputTy =
7140         llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7141     llvm::Type *Tys[2] = { Ty, InputTy };
7142     Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
7143     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vdot");
7144   }
7145   case NEON::BI__builtin_neon_vfmlal_low_v:
7146   case NEON::BI__builtin_neon_vfmlalq_low_v: {
7147     auto *InputTy =
7148         llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7149     llvm::Type *Tys[2] = { Ty, InputTy };
7150     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_low");
7151   }
7152   case NEON::BI__builtin_neon_vfmlsl_low_v:
7153   case NEON::BI__builtin_neon_vfmlslq_low_v: {
7154     auto *InputTy =
7155         llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7156     llvm::Type *Tys[2] = { Ty, InputTy };
7157     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_low");
7158   }
7159   case NEON::BI__builtin_neon_vfmlal_high_v:
7160   case NEON::BI__builtin_neon_vfmlalq_high_v: {
7161     auto *InputTy =
7162         llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7163     llvm::Type *Tys[2] = { Ty, InputTy };
7164     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlal_high");
7165   }
7166   case NEON::BI__builtin_neon_vfmlsl_high_v:
7167   case NEON::BI__builtin_neon_vfmlslq_high_v: {
7168     auto *InputTy =
7169         llvm::FixedVectorType::get(HalfTy, Ty->getPrimitiveSizeInBits() / 16);
7170     llvm::Type *Tys[2] = { Ty, InputTy };
7171     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vfmlsl_high");
7172   }
7173   case NEON::BI__builtin_neon_vmmlaq_v: {
7174     auto *InputTy =
7175         llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7176     llvm::Type *Tys[2] = { Ty, InputTy };
7177     Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
7178     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmmla");
7179   }
7180   case NEON::BI__builtin_neon_vusmmlaq_v: {
7181     auto *InputTy =
7182         llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7183     llvm::Type *Tys[2] = { Ty, InputTy };
7184     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusmmla");
7185   }
7186   case NEON::BI__builtin_neon_vusdot_v:
7187   case NEON::BI__builtin_neon_vusdotq_v: {
7188     auto *InputTy =
7189         llvm::FixedVectorType::get(Int8Ty, Ty->getPrimitiveSizeInBits() / 8);
7190     llvm::Type *Tys[2] = { Ty, InputTy };
7191     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vusdot");
7192   }
7193   case NEON::BI__builtin_neon_vbfdot_v:
7194   case NEON::BI__builtin_neon_vbfdotq_v: {
7195     llvm::Type *InputTy =
7196         llvm::FixedVectorType::get(BFloatTy, Ty->getPrimitiveSizeInBits() / 16);
7197     llvm::Type *Tys[2] = { Ty, InputTy };
7198     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vbfdot");
7199   }
7200   case NEON::BI__builtin_neon___a32_vcvt_bf16_v: {
7201     llvm::Type *Tys[1] = { Ty };
7202     Function *F = CGM.getIntrinsic(Int, Tys);
7203     return EmitNeonCall(F, Ops, "vcvtfp2bf");
7204   }
7205 
7206   }
7207 
7208   assert(Int && "Expected valid intrinsic number");
7209 
7210   // Determine the type(s) of this overloaded AArch64 intrinsic.
7211   Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
7212 
7213   Value *Result = EmitNeonCall(F, Ops, NameHint);
7214   llvm::Type *ResultType = ConvertType(E->getType());
7215   // AArch64 intrinsic one-element vector type cast to
7216   // scalar type expected by the builtin
7217   return Builder.CreateBitCast(Result, ResultType, NameHint);
7218 }
7219 
7220 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
7221     Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
7222     const CmpInst::Predicate Ip, const Twine &Name) {
7223   llvm::Type *OTy = Op->getType();
7224 
7225   // FIXME: this is utterly horrific. We should not be looking at previous
7226   // codegen context to find out what needs doing. Unfortunately TableGen
7227   // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
7228   // (etc).
7229   if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
7230     OTy = BI->getOperand(0)->getType();
7231 
7232   Op = Builder.CreateBitCast(Op, OTy);
7233   if (OTy->getScalarType()->isFloatingPointTy()) {
7234     Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
7235   } else {
7236     Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
7237   }
7238   return Builder.CreateSExt(Op, Ty, Name);
7239 }
7240 
7241 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
7242                                  Value *ExtOp, Value *IndexOp,
7243                                  llvm::Type *ResTy, unsigned IntID,
7244                                  const char *Name) {
7245   SmallVector<Value *, 2> TblOps;
7246   if (ExtOp)
7247     TblOps.push_back(ExtOp);
7248 
7249   // Build a vector containing sequential number like (0, 1, 2, ..., 15)
7250   SmallVector<int, 16> Indices;
7251   auto *TblTy = cast<llvm::FixedVectorType>(Ops[0]->getType());
7252   for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
7253     Indices.push_back(2*i);
7254     Indices.push_back(2*i+1);
7255   }
7256 
7257   int PairPos = 0, End = Ops.size() - 1;
7258   while (PairPos < End) {
7259     TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7260                                                      Ops[PairPos+1], Indices,
7261                                                      Name));
7262     PairPos += 2;
7263   }
7264 
7265   // If there's an odd number of 64-bit lookup table, fill the high 64-bit
7266   // of the 128-bit lookup table with zero.
7267   if (PairPos == End) {
7268     Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
7269     TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
7270                                                      ZeroTbl, Indices, Name));
7271   }
7272 
7273   Function *TblF;
7274   TblOps.push_back(IndexOp);
7275   TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
7276 
7277   return CGF.EmitNeonCall(TblF, TblOps, Name);
7278 }
7279 
7280 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
7281   unsigned Value;
7282   switch (BuiltinID) {
7283   default:
7284     return nullptr;
7285   case ARM::BI__builtin_arm_nop:
7286     Value = 0;
7287     break;
7288   case ARM::BI__builtin_arm_yield:
7289   case ARM::BI__yield:
7290     Value = 1;
7291     break;
7292   case ARM::BI__builtin_arm_wfe:
7293   case ARM::BI__wfe:
7294     Value = 2;
7295     break;
7296   case ARM::BI__builtin_arm_wfi:
7297   case ARM::BI__wfi:
7298     Value = 3;
7299     break;
7300   case ARM::BI__builtin_arm_sev:
7301   case ARM::BI__sev:
7302     Value = 4;
7303     break;
7304   case ARM::BI__builtin_arm_sevl:
7305   case ARM::BI__sevl:
7306     Value = 5;
7307     break;
7308   }
7309 
7310   return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
7311                             llvm::ConstantInt::get(Int32Ty, Value));
7312 }
7313 
7314 enum SpecialRegisterAccessKind {
7315   NormalRead,
7316   VolatileRead,
7317   Write,
7318 };
7319 
7320 // Generates the IR for the read/write special register builtin,
7321 // ValueType is the type of the value that is to be written or read,
7322 // RegisterType is the type of the register being written to or read from.
7323 static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
7324                                          const CallExpr *E,
7325                                          llvm::Type *RegisterType,
7326                                          llvm::Type *ValueType,
7327                                          SpecialRegisterAccessKind AccessKind,
7328                                          StringRef SysReg = "") {
7329   // write and register intrinsics only support 32 and 64 bit operations.
7330   assert((RegisterType->isIntegerTy(32) || RegisterType->isIntegerTy(64))
7331           && "Unsupported size for register.");
7332 
7333   CodeGen::CGBuilderTy &Builder = CGF.Builder;
7334   CodeGen::CodeGenModule &CGM = CGF.CGM;
7335   LLVMContext &Context = CGM.getLLVMContext();
7336 
7337   if (SysReg.empty()) {
7338     const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
7339     SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
7340   }
7341 
7342   llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
7343   llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
7344   llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
7345 
7346   llvm::Type *Types[] = { RegisterType };
7347 
7348   bool MixedTypes = RegisterType->isIntegerTy(64) && ValueType->isIntegerTy(32);
7349   assert(!(RegisterType->isIntegerTy(32) && ValueType->isIntegerTy(64))
7350             && "Can't fit 64-bit value in 32-bit register");
7351 
7352   if (AccessKind != Write) {
7353     assert(AccessKind == NormalRead || AccessKind == VolatileRead);
7354     llvm::Function *F = CGM.getIntrinsic(
7355         AccessKind == VolatileRead ? llvm::Intrinsic::read_volatile_register
7356                                    : llvm::Intrinsic::read_register,
7357         Types);
7358     llvm::Value *Call = Builder.CreateCall(F, Metadata);
7359 
7360     if (MixedTypes)
7361       // Read into 64 bit register and then truncate result to 32 bit.
7362       return Builder.CreateTrunc(Call, ValueType);
7363 
7364     if (ValueType->isPointerTy())
7365       // Have i32/i64 result (Call) but want to return a VoidPtrTy (i8*).
7366       return Builder.CreateIntToPtr(Call, ValueType);
7367 
7368     return Call;
7369   }
7370 
7371   llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
7372   llvm::Value *ArgValue = CGF.EmitScalarExpr(E->getArg(1));
7373   if (MixedTypes) {
7374     // Extend 32 bit write value to 64 bit to pass to write.
7375     ArgValue = Builder.CreateZExt(ArgValue, RegisterType);
7376     return Builder.CreateCall(F, { Metadata, ArgValue });
7377   }
7378 
7379   if (ValueType->isPointerTy()) {
7380     // Have VoidPtrTy ArgValue but want to return an i32/i64.
7381     ArgValue = Builder.CreatePtrToInt(ArgValue, RegisterType);
7382     return Builder.CreateCall(F, { Metadata, ArgValue });
7383   }
7384 
7385   return Builder.CreateCall(F, { Metadata, ArgValue });
7386 }
7387 
7388 /// Return true if BuiltinID is an overloaded Neon intrinsic with an extra
7389 /// argument that specifies the vector type.
7390 static bool HasExtraNeonArgument(unsigned BuiltinID) {
7391   switch (BuiltinID) {
7392   default: break;
7393   case NEON::BI__builtin_neon_vget_lane_i8:
7394   case NEON::BI__builtin_neon_vget_lane_i16:
7395   case NEON::BI__builtin_neon_vget_lane_bf16:
7396   case NEON::BI__builtin_neon_vget_lane_i32:
7397   case NEON::BI__builtin_neon_vget_lane_i64:
7398   case NEON::BI__builtin_neon_vget_lane_f32:
7399   case NEON::BI__builtin_neon_vgetq_lane_i8:
7400   case NEON::BI__builtin_neon_vgetq_lane_i16:
7401   case NEON::BI__builtin_neon_vgetq_lane_bf16:
7402   case NEON::BI__builtin_neon_vgetq_lane_i32:
7403   case NEON::BI__builtin_neon_vgetq_lane_i64:
7404   case NEON::BI__builtin_neon_vgetq_lane_f32:
7405   case NEON::BI__builtin_neon_vduph_lane_bf16:
7406   case NEON::BI__builtin_neon_vduph_laneq_bf16:
7407   case NEON::BI__builtin_neon_vset_lane_i8:
7408   case NEON::BI__builtin_neon_vset_lane_i16:
7409   case NEON::BI__builtin_neon_vset_lane_bf16:
7410   case NEON::BI__builtin_neon_vset_lane_i32:
7411   case NEON::BI__builtin_neon_vset_lane_i64:
7412   case NEON::BI__builtin_neon_vset_lane_f32:
7413   case NEON::BI__builtin_neon_vsetq_lane_i8:
7414   case NEON::BI__builtin_neon_vsetq_lane_i16:
7415   case NEON::BI__builtin_neon_vsetq_lane_bf16:
7416   case NEON::BI__builtin_neon_vsetq_lane_i32:
7417   case NEON::BI__builtin_neon_vsetq_lane_i64:
7418   case NEON::BI__builtin_neon_vsetq_lane_f32:
7419   case NEON::BI__builtin_neon_vsha1h_u32:
7420   case NEON::BI__builtin_neon_vsha1cq_u32:
7421   case NEON::BI__builtin_neon_vsha1pq_u32:
7422   case NEON::BI__builtin_neon_vsha1mq_u32:
7423   case NEON::BI__builtin_neon_vcvth_bf16_f32:
7424   case clang::ARM::BI_MoveToCoprocessor:
7425   case clang::ARM::BI_MoveToCoprocessor2:
7426     return false;
7427   }
7428   return true;
7429 }
7430 
7431 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
7432                                            const CallExpr *E,
7433                                            ReturnValueSlot ReturnValue,
7434                                            llvm::Triple::ArchType Arch) {
7435   if (auto Hint = GetValueForARMHint(BuiltinID))
7436     return Hint;
7437 
7438   if (BuiltinID == ARM::BI__emit) {
7439     bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
7440     llvm::FunctionType *FTy =
7441         llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
7442 
7443     Expr::EvalResult Result;
7444     if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
7445       llvm_unreachable("Sema will ensure that the parameter is constant");
7446 
7447     llvm::APSInt Value = Result.Val.getInt();
7448     uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
7449 
7450     llvm::InlineAsm *Emit =
7451         IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
7452                                  /*hasSideEffects=*/true)
7453                 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
7454                                  /*hasSideEffects=*/true);
7455 
7456     return Builder.CreateCall(Emit);
7457   }
7458 
7459   if (BuiltinID == ARM::BI__builtin_arm_dbg) {
7460     Value *Option = EmitScalarExpr(E->getArg(0));
7461     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
7462   }
7463 
7464   if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
7465     Value *Address = EmitScalarExpr(E->getArg(0));
7466     Value *RW      = EmitScalarExpr(E->getArg(1));
7467     Value *IsData  = EmitScalarExpr(E->getArg(2));
7468 
7469     // Locality is not supported on ARM target
7470     Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
7471 
7472     Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
7473     return Builder.CreateCall(F, {Address, RW, Locality, IsData});
7474   }
7475 
7476   if (BuiltinID == ARM::BI__builtin_arm_rbit) {
7477     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7478     return Builder.CreateCall(
7479         CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
7480   }
7481 
7482   if (BuiltinID == ARM::BI__builtin_arm_cls) {
7483     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7484     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
7485   }
7486   if (BuiltinID == ARM::BI__builtin_arm_cls64) {
7487     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
7488     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
7489                               "cls");
7490   }
7491 
7492   if (BuiltinID == ARM::BI__clear_cache) {
7493     assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
7494     const FunctionDecl *FD = E->getDirectCallee();
7495     Value *Ops[2];
7496     for (unsigned i = 0; i < 2; i++)
7497       Ops[i] = EmitScalarExpr(E->getArg(i));
7498     llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
7499     llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
7500     StringRef Name = FD->getName();
7501     return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
7502   }
7503 
7504   if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
7505       BuiltinID == ARM::BI__builtin_arm_mcrr2) {
7506     Function *F;
7507 
7508     switch (BuiltinID) {
7509     default: llvm_unreachable("unexpected builtin");
7510     case ARM::BI__builtin_arm_mcrr:
7511       F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
7512       break;
7513     case ARM::BI__builtin_arm_mcrr2:
7514       F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
7515       break;
7516     }
7517 
7518     // MCRR{2} instruction has 5 operands but
7519     // the intrinsic has 4 because Rt and Rt2
7520     // are represented as a single unsigned 64
7521     // bit integer in the intrinsic definition
7522     // but internally it's represented as 2 32
7523     // bit integers.
7524 
7525     Value *Coproc = EmitScalarExpr(E->getArg(0));
7526     Value *Opc1 = EmitScalarExpr(E->getArg(1));
7527     Value *RtAndRt2 = EmitScalarExpr(E->getArg(2));
7528     Value *CRm = EmitScalarExpr(E->getArg(3));
7529 
7530     Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7531     Value *Rt = Builder.CreateTruncOrBitCast(RtAndRt2, Int32Ty);
7532     Value *Rt2 = Builder.CreateLShr(RtAndRt2, C1);
7533     Rt2 = Builder.CreateTruncOrBitCast(Rt2, Int32Ty);
7534 
7535     return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
7536   }
7537 
7538   if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
7539       BuiltinID == ARM::BI__builtin_arm_mrrc2) {
7540     Function *F;
7541 
7542     switch (BuiltinID) {
7543     default: llvm_unreachable("unexpected builtin");
7544     case ARM::BI__builtin_arm_mrrc:
7545       F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
7546       break;
7547     case ARM::BI__builtin_arm_mrrc2:
7548       F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
7549       break;
7550     }
7551 
7552     Value *Coproc = EmitScalarExpr(E->getArg(0));
7553     Value *Opc1 = EmitScalarExpr(E->getArg(1));
7554     Value *CRm  = EmitScalarExpr(E->getArg(2));
7555     Value *RtAndRt2 = Builder.CreateCall(F, {Coproc, Opc1, CRm});
7556 
7557     // Returns an unsigned 64 bit integer, represented
7558     // as two 32 bit integers.
7559 
7560     Value *Rt = Builder.CreateExtractValue(RtAndRt2, 1);
7561     Value *Rt1 = Builder.CreateExtractValue(RtAndRt2, 0);
7562     Rt = Builder.CreateZExt(Rt, Int64Ty);
7563     Rt1 = Builder.CreateZExt(Rt1, Int64Ty);
7564 
7565     Value *ShiftCast = llvm::ConstantInt::get(Int64Ty, 32);
7566     RtAndRt2 = Builder.CreateShl(Rt, ShiftCast, "shl", true);
7567     RtAndRt2 = Builder.CreateOr(RtAndRt2, Rt1);
7568 
7569     return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
7570   }
7571 
7572   if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
7573       ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
7574         BuiltinID == ARM::BI__builtin_arm_ldaex) &&
7575        getContext().getTypeSize(E->getType()) == 64) ||
7576       BuiltinID == ARM::BI__ldrexd) {
7577     Function *F;
7578 
7579     switch (BuiltinID) {
7580     default: llvm_unreachable("unexpected builtin");
7581     case ARM::BI__builtin_arm_ldaex:
7582       F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
7583       break;
7584     case ARM::BI__builtin_arm_ldrexd:
7585     case ARM::BI__builtin_arm_ldrex:
7586     case ARM::BI__ldrexd:
7587       F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
7588       break;
7589     }
7590 
7591     Value *LdPtr = EmitScalarExpr(E->getArg(0));
7592     Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
7593                                     "ldrexd");
7594 
7595     Value *Val0 = Builder.CreateExtractValue(Val, 1);
7596     Value *Val1 = Builder.CreateExtractValue(Val, 0);
7597     Val0 = Builder.CreateZExt(Val0, Int64Ty);
7598     Val1 = Builder.CreateZExt(Val1, Int64Ty);
7599 
7600     Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
7601     Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
7602     Val = Builder.CreateOr(Val, Val1);
7603     return Builder.CreateBitCast(Val, ConvertType(E->getType()));
7604   }
7605 
7606   if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
7607       BuiltinID == ARM::BI__builtin_arm_ldaex) {
7608     Value *LoadAddr = EmitScalarExpr(E->getArg(0));
7609 
7610     QualType Ty = E->getType();
7611     llvm::Type *RealResTy = ConvertType(Ty);
7612     llvm::Type *PtrTy = llvm::IntegerType::get(
7613         getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
7614     LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
7615 
7616     Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
7617                                        ? Intrinsic::arm_ldaex
7618                                        : Intrinsic::arm_ldrex,
7619                                    PtrTy);
7620     Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
7621 
7622     if (RealResTy->isPointerTy())
7623       return Builder.CreateIntToPtr(Val, RealResTy);
7624     else {
7625       llvm::Type *IntResTy = llvm::IntegerType::get(
7626           getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
7627       Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
7628       return Builder.CreateBitCast(Val, RealResTy);
7629     }
7630   }
7631 
7632   if (BuiltinID == ARM::BI__builtin_arm_strexd ||
7633       ((BuiltinID == ARM::BI__builtin_arm_stlex ||
7634         BuiltinID == ARM::BI__builtin_arm_strex) &&
7635        getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
7636     Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7637                                        ? Intrinsic::arm_stlexd
7638                                        : Intrinsic::arm_strexd);
7639     llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
7640 
7641     Address Tmp = CreateMemTemp(E->getArg(0)->getType());
7642     Value *Val = EmitScalarExpr(E->getArg(0));
7643     Builder.CreateStore(Val, Tmp);
7644 
7645     Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
7646     Val = Builder.CreateLoad(LdPtr);
7647 
7648     Value *Arg0 = Builder.CreateExtractValue(Val, 0);
7649     Value *Arg1 = Builder.CreateExtractValue(Val, 1);
7650     Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
7651     return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
7652   }
7653 
7654   if (BuiltinID == ARM::BI__builtin_arm_strex ||
7655       BuiltinID == ARM::BI__builtin_arm_stlex) {
7656     Value *StoreVal = EmitScalarExpr(E->getArg(0));
7657     Value *StoreAddr = EmitScalarExpr(E->getArg(1));
7658 
7659     QualType Ty = E->getArg(0)->getType();
7660     llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
7661                                                  getContext().getTypeSize(Ty));
7662     StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
7663 
7664     if (StoreVal->getType()->isPointerTy())
7665       StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
7666     else {
7667       llvm::Type *IntTy = llvm::IntegerType::get(
7668           getLLVMContext(),
7669           CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
7670       StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
7671       StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
7672     }
7673 
7674     Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
7675                                        ? Intrinsic::arm_stlex
7676                                        : Intrinsic::arm_strex,
7677                                    StoreAddr->getType());
7678     return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
7679   }
7680 
7681   if (BuiltinID == ARM::BI__builtin_arm_clrex) {
7682     Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
7683     return Builder.CreateCall(F);
7684   }
7685 
7686   // CRC32
7687   Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
7688   switch (BuiltinID) {
7689   case ARM::BI__builtin_arm_crc32b:
7690     CRCIntrinsicID = Intrinsic::arm_crc32b; break;
7691   case ARM::BI__builtin_arm_crc32cb:
7692     CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
7693   case ARM::BI__builtin_arm_crc32h:
7694     CRCIntrinsicID = Intrinsic::arm_crc32h; break;
7695   case ARM::BI__builtin_arm_crc32ch:
7696     CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
7697   case ARM::BI__builtin_arm_crc32w:
7698   case ARM::BI__builtin_arm_crc32d:
7699     CRCIntrinsicID = Intrinsic::arm_crc32w; break;
7700   case ARM::BI__builtin_arm_crc32cw:
7701   case ARM::BI__builtin_arm_crc32cd:
7702     CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
7703   }
7704 
7705   if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
7706     Value *Arg0 = EmitScalarExpr(E->getArg(0));
7707     Value *Arg1 = EmitScalarExpr(E->getArg(1));
7708 
7709     // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
7710     // intrinsics, hence we need different codegen for these cases.
7711     if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
7712         BuiltinID == ARM::BI__builtin_arm_crc32cd) {
7713       Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
7714       Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
7715       Value *Arg1b = Builder.CreateLShr(Arg1, C1);
7716       Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
7717 
7718       Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7719       Value *Res = Builder.CreateCall(F, {Arg0, Arg1a});
7720       return Builder.CreateCall(F, {Res, Arg1b});
7721     } else {
7722       Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
7723 
7724       Function *F = CGM.getIntrinsic(CRCIntrinsicID);
7725       return Builder.CreateCall(F, {Arg0, Arg1});
7726     }
7727   }
7728 
7729   if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7730       BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7731       BuiltinID == ARM::BI__builtin_arm_rsrp ||
7732       BuiltinID == ARM::BI__builtin_arm_wsr ||
7733       BuiltinID == ARM::BI__builtin_arm_wsr64 ||
7734       BuiltinID == ARM::BI__builtin_arm_wsrp) {
7735 
7736     SpecialRegisterAccessKind AccessKind = Write;
7737     if (BuiltinID == ARM::BI__builtin_arm_rsr ||
7738         BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7739         BuiltinID == ARM::BI__builtin_arm_rsrp)
7740       AccessKind = VolatileRead;
7741 
7742     bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
7743                             BuiltinID == ARM::BI__builtin_arm_wsrp;
7744 
7745     bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
7746                    BuiltinID == ARM::BI__builtin_arm_wsr64;
7747 
7748     llvm::Type *ValueType;
7749     llvm::Type *RegisterType;
7750     if (IsPointerBuiltin) {
7751       ValueType = VoidPtrTy;
7752       RegisterType = Int32Ty;
7753     } else if (Is64Bit) {
7754       ValueType = RegisterType = Int64Ty;
7755     } else {
7756       ValueType = RegisterType = Int32Ty;
7757     }
7758 
7759     return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
7760                                       AccessKind);
7761   }
7762 
7763   // Handle MSVC intrinsics before argument evaluation to prevent double
7764   // evaluation.
7765   if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
7766     return EmitMSVCBuiltinExpr(*MsvcIntId, E);
7767 
7768   // Deal with MVE builtins
7769   if (Value *Result = EmitARMMVEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7770     return Result;
7771   // Handle CDE builtins
7772   if (Value *Result = EmitARMCDEBuiltinExpr(BuiltinID, E, ReturnValue, Arch))
7773     return Result;
7774 
7775   // Find out if any arguments are required to be integer constant
7776   // expressions.
7777   unsigned ICEArguments = 0;
7778   ASTContext::GetBuiltinTypeError Error;
7779   getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
7780   assert(Error == ASTContext::GE_None && "Should not codegen an error");
7781 
7782   auto getAlignmentValue32 = [&](Address addr) -> Value* {
7783     return Builder.getInt32(addr.getAlignment().getQuantity());
7784   };
7785 
7786   Address PtrOp0 = Address::invalid();
7787   Address PtrOp1 = Address::invalid();
7788   SmallVector<Value*, 4> Ops;
7789   bool HasExtraArg = HasExtraNeonArgument(BuiltinID);
7790   unsigned NumArgs = E->getNumArgs() - (HasExtraArg ? 1 : 0);
7791   for (unsigned i = 0, e = NumArgs; i != e; i++) {
7792     if (i == 0) {
7793       switch (BuiltinID) {
7794       case NEON::BI__builtin_neon_vld1_v:
7795       case NEON::BI__builtin_neon_vld1q_v:
7796       case NEON::BI__builtin_neon_vld1q_lane_v:
7797       case NEON::BI__builtin_neon_vld1_lane_v:
7798       case NEON::BI__builtin_neon_vld1_dup_v:
7799       case NEON::BI__builtin_neon_vld1q_dup_v:
7800       case NEON::BI__builtin_neon_vst1_v:
7801       case NEON::BI__builtin_neon_vst1q_v:
7802       case NEON::BI__builtin_neon_vst1q_lane_v:
7803       case NEON::BI__builtin_neon_vst1_lane_v:
7804       case NEON::BI__builtin_neon_vst2_v:
7805       case NEON::BI__builtin_neon_vst2q_v:
7806       case NEON::BI__builtin_neon_vst2_lane_v:
7807       case NEON::BI__builtin_neon_vst2q_lane_v:
7808       case NEON::BI__builtin_neon_vst3_v:
7809       case NEON::BI__builtin_neon_vst3q_v:
7810       case NEON::BI__builtin_neon_vst3_lane_v:
7811       case NEON::BI__builtin_neon_vst3q_lane_v:
7812       case NEON::BI__builtin_neon_vst4_v:
7813       case NEON::BI__builtin_neon_vst4q_v:
7814       case NEON::BI__builtin_neon_vst4_lane_v:
7815       case NEON::BI__builtin_neon_vst4q_lane_v:
7816         // Get the alignment for the argument in addition to the value;
7817         // we'll use it later.
7818         PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
7819         Ops.push_back(PtrOp0.getPointer());
7820         continue;
7821       }
7822     }
7823     if (i == 1) {
7824       switch (BuiltinID) {
7825       case NEON::BI__builtin_neon_vld2_v:
7826       case NEON::BI__builtin_neon_vld2q_v:
7827       case NEON::BI__builtin_neon_vld3_v:
7828       case NEON::BI__builtin_neon_vld3q_v:
7829       case NEON::BI__builtin_neon_vld4_v:
7830       case NEON::BI__builtin_neon_vld4q_v:
7831       case NEON::BI__builtin_neon_vld2_lane_v:
7832       case NEON::BI__builtin_neon_vld2q_lane_v:
7833       case NEON::BI__builtin_neon_vld3_lane_v:
7834       case NEON::BI__builtin_neon_vld3q_lane_v:
7835       case NEON::BI__builtin_neon_vld4_lane_v:
7836       case NEON::BI__builtin_neon_vld4q_lane_v:
7837       case NEON::BI__builtin_neon_vld2_dup_v:
7838       case NEON::BI__builtin_neon_vld2q_dup_v:
7839       case NEON::BI__builtin_neon_vld3_dup_v:
7840       case NEON::BI__builtin_neon_vld3q_dup_v:
7841       case NEON::BI__builtin_neon_vld4_dup_v:
7842       case NEON::BI__builtin_neon_vld4q_dup_v:
7843         // Get the alignment for the argument in addition to the value;
7844         // we'll use it later.
7845         PtrOp1 = EmitPointerWithAlignment(E->getArg(1));
7846         Ops.push_back(PtrOp1.getPointer());
7847         continue;
7848       }
7849     }
7850 
7851     if ((ICEArguments & (1 << i)) == 0) {
7852       Ops.push_back(EmitScalarExpr(E->getArg(i)));
7853     } else {
7854       // If this is required to be a constant, constant fold it so that we know
7855       // that the generated intrinsic gets a ConstantInt.
7856       Ops.push_back(llvm::ConstantInt::get(
7857           getLLVMContext(),
7858           *E->getArg(i)->getIntegerConstantExpr(getContext())));
7859     }
7860   }
7861 
7862   switch (BuiltinID) {
7863   default: break;
7864 
7865   case NEON::BI__builtin_neon_vget_lane_i8:
7866   case NEON::BI__builtin_neon_vget_lane_i16:
7867   case NEON::BI__builtin_neon_vget_lane_i32:
7868   case NEON::BI__builtin_neon_vget_lane_i64:
7869   case NEON::BI__builtin_neon_vget_lane_bf16:
7870   case NEON::BI__builtin_neon_vget_lane_f32:
7871   case NEON::BI__builtin_neon_vgetq_lane_i8:
7872   case NEON::BI__builtin_neon_vgetq_lane_i16:
7873   case NEON::BI__builtin_neon_vgetq_lane_i32:
7874   case NEON::BI__builtin_neon_vgetq_lane_i64:
7875   case NEON::BI__builtin_neon_vgetq_lane_bf16:
7876   case NEON::BI__builtin_neon_vgetq_lane_f32:
7877   case NEON::BI__builtin_neon_vduph_lane_bf16:
7878   case NEON::BI__builtin_neon_vduph_laneq_bf16:
7879     return Builder.CreateExtractElement(Ops[0], Ops[1], "vget_lane");
7880 
7881   case NEON::BI__builtin_neon_vrndns_f32: {
7882     Value *Arg = EmitScalarExpr(E->getArg(0));
7883     llvm::Type *Tys[] = {Arg->getType()};
7884     Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vrintn, Tys);
7885     return Builder.CreateCall(F, {Arg}, "vrndn"); }
7886 
7887   case NEON::BI__builtin_neon_vset_lane_i8:
7888   case NEON::BI__builtin_neon_vset_lane_i16:
7889   case NEON::BI__builtin_neon_vset_lane_i32:
7890   case NEON::BI__builtin_neon_vset_lane_i64:
7891   case NEON::BI__builtin_neon_vset_lane_bf16:
7892   case NEON::BI__builtin_neon_vset_lane_f32:
7893   case NEON::BI__builtin_neon_vsetq_lane_i8:
7894   case NEON::BI__builtin_neon_vsetq_lane_i16:
7895   case NEON::BI__builtin_neon_vsetq_lane_i32:
7896   case NEON::BI__builtin_neon_vsetq_lane_i64:
7897   case NEON::BI__builtin_neon_vsetq_lane_bf16:
7898   case NEON::BI__builtin_neon_vsetq_lane_f32:
7899     return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
7900 
7901   case NEON::BI__builtin_neon_vsha1h_u32:
7902     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
7903                         "vsha1h");
7904   case NEON::BI__builtin_neon_vsha1cq_u32:
7905     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
7906                         "vsha1h");
7907   case NEON::BI__builtin_neon_vsha1pq_u32:
7908     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
7909                         "vsha1h");
7910   case NEON::BI__builtin_neon_vsha1mq_u32:
7911     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
7912                         "vsha1h");
7913 
7914   case NEON::BI__builtin_neon_vcvth_bf16_f32: {
7915     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vcvtbfp2bf), Ops,
7916                         "vcvtbfp2bf");
7917   }
7918 
7919   // The ARM _MoveToCoprocessor builtins put the input register value as
7920   // the first argument, but the LLVM intrinsic expects it as the third one.
7921   case ARM::BI_MoveToCoprocessor:
7922   case ARM::BI_MoveToCoprocessor2: {
7923     Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
7924                                    Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
7925     return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
7926                                   Ops[3], Ops[4], Ops[5]});
7927   }
7928   }
7929 
7930   // Get the last argument, which specifies the vector type.
7931   assert(HasExtraArg);
7932   const Expr *Arg = E->getArg(E->getNumArgs()-1);
7933   Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext());
7934   if (!Result)
7935     return nullptr;
7936 
7937   if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
7938       BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
7939     // Determine the overloaded type of this builtin.
7940     llvm::Type *Ty;
7941     if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
7942       Ty = FloatTy;
7943     else
7944       Ty = DoubleTy;
7945 
7946     // Determine whether this is an unsigned conversion or not.
7947     bool usgn = Result->getZExtValue() == 1;
7948     unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
7949 
7950     // Call the appropriate intrinsic.
7951     Function *F = CGM.getIntrinsic(Int, Ty);
7952     return Builder.CreateCall(F, Ops, "vcvtr");
7953   }
7954 
7955   // Determine the type of this overloaded NEON intrinsic.
7956   NeonTypeFlags Type = Result->getZExtValue();
7957   bool usgn = Type.isUnsigned();
7958   bool rightShift = false;
7959 
7960   llvm::FixedVectorType *VTy =
7961       GetNeonType(this, Type, getTarget().hasLegalHalfType(), false,
7962                   getTarget().hasBFloat16Type());
7963   llvm::Type *Ty = VTy;
7964   if (!Ty)
7965     return nullptr;
7966 
7967   // Many NEON builtins have identical semantics and uses in ARM and
7968   // AArch64. Emit these in a single function.
7969   auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
7970   const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
7971       IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
7972   if (Builtin)
7973     return EmitCommonNeonBuiltinExpr(
7974         Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
7975         Builtin->NameHint, Builtin->TypeModifier, E, Ops, PtrOp0, PtrOp1, Arch);
7976 
7977   unsigned Int;
7978   switch (BuiltinID) {
7979   default: return nullptr;
7980   case NEON::BI__builtin_neon_vld1q_lane_v:
7981     // Handle 64-bit integer elements as a special case.  Use shuffles of
7982     // one-element vectors to avoid poor code for i64 in the backend.
7983     if (VTy->getElementType()->isIntegerTy(64)) {
7984       // Extract the other lane.
7985       Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
7986       int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
7987       Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
7988       Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
7989       // Load the value as a one-element vector.
7990       Ty = llvm::FixedVectorType::get(VTy->getElementType(), 1);
7991       llvm::Type *Tys[] = {Ty, Int8PtrTy};
7992       Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Tys);
7993       Value *Align = getAlignmentValue32(PtrOp0);
7994       Value *Ld = Builder.CreateCall(F, {Ops[0], Align});
7995       // Combine them.
7996       int Indices[] = {1 - Lane, Lane};
7997       return Builder.CreateShuffleVector(Ops[1], Ld, Indices, "vld1q_lane");
7998     }
7999     LLVM_FALLTHROUGH;
8000   case NEON::BI__builtin_neon_vld1_lane_v: {
8001     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8002     PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
8003     Value *Ld = Builder.CreateLoad(PtrOp0);
8004     return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
8005   }
8006   case NEON::BI__builtin_neon_vqrshrn_n_v:
8007     Int =
8008       usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
8009     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
8010                         1, true);
8011   case NEON::BI__builtin_neon_vqrshrun_n_v:
8012     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
8013                         Ops, "vqrshrun_n", 1, true);
8014   case NEON::BI__builtin_neon_vqshrn_n_v:
8015     Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
8016     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
8017                         1, true);
8018   case NEON::BI__builtin_neon_vqshrun_n_v:
8019     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
8020                         Ops, "vqshrun_n", 1, true);
8021   case NEON::BI__builtin_neon_vrecpe_v:
8022   case NEON::BI__builtin_neon_vrecpeq_v:
8023     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
8024                         Ops, "vrecpe");
8025   case NEON::BI__builtin_neon_vrshrn_n_v:
8026     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
8027                         Ops, "vrshrn_n", 1, true);
8028   case NEON::BI__builtin_neon_vrsra_n_v:
8029   case NEON::BI__builtin_neon_vrsraq_n_v:
8030     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8031     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8032     Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
8033     Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
8034     Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Ty), {Ops[1], Ops[2]});
8035     return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
8036   case NEON::BI__builtin_neon_vsri_n_v:
8037   case NEON::BI__builtin_neon_vsriq_n_v:
8038     rightShift = true;
8039     LLVM_FALLTHROUGH;
8040   case NEON::BI__builtin_neon_vsli_n_v:
8041   case NEON::BI__builtin_neon_vsliq_n_v:
8042     Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
8043     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
8044                         Ops, "vsli_n");
8045   case NEON::BI__builtin_neon_vsra_n_v:
8046   case NEON::BI__builtin_neon_vsraq_n_v:
8047     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
8048     Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
8049     return Builder.CreateAdd(Ops[0], Ops[1]);
8050   case NEON::BI__builtin_neon_vst1q_lane_v:
8051     // Handle 64-bit integer elements as a special case.  Use a shuffle to get
8052     // a one-element vector and avoid poor code for i64 in the backend.
8053     if (VTy->getElementType()->isIntegerTy(64)) {
8054       Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8055       Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
8056       Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
8057       Ops[2] = getAlignmentValue32(PtrOp0);
8058       llvm::Type *Tys[] = {Int8PtrTy, Ops[1]->getType()};
8059       return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
8060                                                  Tys), Ops);
8061     }
8062     LLVM_FALLTHROUGH;
8063   case NEON::BI__builtin_neon_vst1_lane_v: {
8064     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
8065     Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
8066     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
8067     auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
8068     return St;
8069   }
8070   case NEON::BI__builtin_neon_vtbl1_v:
8071     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
8072                         Ops, "vtbl1");
8073   case NEON::BI__builtin_neon_vtbl2_v:
8074     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
8075                         Ops, "vtbl2");
8076   case NEON::BI__builtin_neon_vtbl3_v:
8077     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
8078                         Ops, "vtbl3");
8079   case NEON::BI__builtin_neon_vtbl4_v:
8080     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
8081                         Ops, "vtbl4");
8082   case NEON::BI__builtin_neon_vtbx1_v:
8083     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
8084                         Ops, "vtbx1");
8085   case NEON::BI__builtin_neon_vtbx2_v:
8086     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
8087                         Ops, "vtbx2");
8088   case NEON::BI__builtin_neon_vtbx3_v:
8089     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
8090                         Ops, "vtbx3");
8091   case NEON::BI__builtin_neon_vtbx4_v:
8092     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
8093                         Ops, "vtbx4");
8094   }
8095 }
8096 
8097 template<typename Integer>
8098 static Integer GetIntegerConstantValue(const Expr *E, ASTContext &Context) {
8099   return E->getIntegerConstantExpr(Context)->getExtValue();
8100 }
8101 
8102 static llvm::Value *SignOrZeroExtend(CGBuilderTy &Builder, llvm::Value *V,
8103                                      llvm::Type *T, bool Unsigned) {
8104   // Helper function called by Tablegen-constructed ARM MVE builtin codegen,
8105   // which finds it convenient to specify signed/unsigned as a boolean flag.
8106   return Unsigned ? Builder.CreateZExt(V, T) : Builder.CreateSExt(V, T);
8107 }
8108 
8109 static llvm::Value *MVEImmediateShr(CGBuilderTy &Builder, llvm::Value *V,
8110                                     uint32_t Shift, bool Unsigned) {
8111   // MVE helper function for integer shift right. This must handle signed vs
8112   // unsigned, and also deal specially with the case where the shift count is
8113   // equal to the lane size. In LLVM IR, an LShr with that parameter would be
8114   // undefined behavior, but in MVE it's legal, so we must convert it to code
8115   // that is not undefined in IR.
8116   unsigned LaneBits = cast<llvm::VectorType>(V->getType())
8117                           ->getElementType()
8118                           ->getPrimitiveSizeInBits();
8119   if (Shift == LaneBits) {
8120     // An unsigned shift of the full lane size always generates zero, so we can
8121     // simply emit a zero vector. A signed shift of the full lane size does the
8122     // same thing as shifting by one bit fewer.
8123     if (Unsigned)
8124       return llvm::Constant::getNullValue(V->getType());
8125     else
8126       --Shift;
8127   }
8128   return Unsigned ? Builder.CreateLShr(V, Shift) : Builder.CreateAShr(V, Shift);
8129 }
8130 
8131 static llvm::Value *ARMMVEVectorSplat(CGBuilderTy &Builder, llvm::Value *V) {
8132   // MVE-specific helper function for a vector splat, which infers the element
8133   // count of the output vector by knowing that MVE vectors are all 128 bits
8134   // wide.
8135   unsigned Elements = 128 / V->getType()->getPrimitiveSizeInBits();
8136   return Builder.CreateVectorSplat(Elements, V);
8137 }
8138 
8139 static llvm::Value *ARMMVEVectorReinterpret(CGBuilderTy &Builder,
8140                                             CodeGenFunction *CGF,
8141                                             llvm::Value *V,
8142                                             llvm::Type *DestType) {
8143   // Convert one MVE vector type into another by reinterpreting its in-register
8144   // format.
8145   //
8146   // Little-endian, this is identical to a bitcast (which reinterprets the
8147   // memory format). But big-endian, they're not necessarily the same, because
8148   // the register and memory formats map to each other differently depending on
8149   // the lane size.
8150   //
8151   // We generate a bitcast whenever we can (if we're little-endian, or if the
8152   // lane sizes are the same anyway). Otherwise we fall back to an IR intrinsic
8153   // that performs the different kind of reinterpretation.
8154   if (CGF->getTarget().isBigEndian() &&
8155       V->getType()->getScalarSizeInBits() != DestType->getScalarSizeInBits()) {
8156     return Builder.CreateCall(
8157         CGF->CGM.getIntrinsic(Intrinsic::arm_mve_vreinterpretq,
8158                               {DestType, V->getType()}),
8159         V);
8160   } else {
8161     return Builder.CreateBitCast(V, DestType);
8162   }
8163 }
8164 
8165 static llvm::Value *VectorUnzip(CGBuilderTy &Builder, llvm::Value *V, bool Odd) {
8166   // Make a shufflevector that extracts every other element of a vector (evens
8167   // or odds, as desired).
8168   SmallVector<int, 16> Indices;
8169   unsigned InputElements =
8170       cast<llvm::FixedVectorType>(V->getType())->getNumElements();
8171   for (unsigned i = 0; i < InputElements; i += 2)
8172     Indices.push_back(i + Odd);
8173   return Builder.CreateShuffleVector(V, Indices);
8174 }
8175 
8176 static llvm::Value *VectorZip(CGBuilderTy &Builder, llvm::Value *V0,
8177                               llvm::Value *V1) {
8178   // Make a shufflevector that interleaves two vectors element by element.
8179   assert(V0->getType() == V1->getType() && "Can't zip different vector types");
8180   SmallVector<int, 16> Indices;
8181   unsigned InputElements =
8182       cast<llvm::FixedVectorType>(V0->getType())->getNumElements();
8183   for (unsigned i = 0; i < InputElements; i++) {
8184     Indices.push_back(i);
8185     Indices.push_back(i + InputElements);
8186   }
8187   return Builder.CreateShuffleVector(V0, V1, Indices);
8188 }
8189 
8190 template<unsigned HighBit, unsigned OtherBits>
8191 static llvm::Value *ARMMVEConstantSplat(CGBuilderTy &Builder, llvm::Type *VT) {
8192   // MVE-specific helper function to make a vector splat of a constant such as
8193   // UINT_MAX or INT_MIN, in which all bits below the highest one are equal.
8194   llvm::Type *T = cast<llvm::VectorType>(VT)->getElementType();
8195   unsigned LaneBits = T->getPrimitiveSizeInBits();
8196   uint32_t Value = HighBit << (LaneBits - 1);
8197   if (OtherBits)
8198     Value |= (1UL << (LaneBits - 1)) - 1;
8199   llvm::Value *Lane = llvm::ConstantInt::get(T, Value);
8200   return ARMMVEVectorSplat(Builder, Lane);
8201 }
8202 
8203 static llvm::Value *ARMMVEVectorElementReverse(CGBuilderTy &Builder,
8204                                                llvm::Value *V,
8205                                                unsigned ReverseWidth) {
8206   // MVE-specific helper function which reverses the elements of a
8207   // vector within every (ReverseWidth)-bit collection of lanes.
8208   SmallVector<int, 16> Indices;
8209   unsigned LaneSize = V->getType()->getScalarSizeInBits();
8210   unsigned Elements = 128 / LaneSize;
8211   unsigned Mask = ReverseWidth / LaneSize - 1;
8212   for (unsigned i = 0; i < Elements; i++)
8213     Indices.push_back(i ^ Mask);
8214   return Builder.CreateShuffleVector(V, Indices);
8215 }
8216 
8217 Value *CodeGenFunction::EmitARMMVEBuiltinExpr(unsigned BuiltinID,
8218                                               const CallExpr *E,
8219                                               ReturnValueSlot ReturnValue,
8220                                               llvm::Triple::ArchType Arch) {
8221   enum class CustomCodeGen { VLD24, VST24 } CustomCodeGenType;
8222   Intrinsic::ID IRIntr;
8223   unsigned NumVectors;
8224 
8225   // Code autogenerated by Tablegen will handle all the simple builtins.
8226   switch (BuiltinID) {
8227     #include "clang/Basic/arm_mve_builtin_cg.inc"
8228 
8229     // If we didn't match an MVE builtin id at all, go back to the
8230     // main EmitARMBuiltinExpr.
8231   default:
8232     return nullptr;
8233   }
8234 
8235   // Anything that breaks from that switch is an MVE builtin that
8236   // needs handwritten code to generate.
8237 
8238   switch (CustomCodeGenType) {
8239 
8240   case CustomCodeGen::VLD24: {
8241     llvm::SmallVector<Value *, 4> Ops;
8242     llvm::SmallVector<llvm::Type *, 4> Tys;
8243 
8244     auto MvecCType = E->getType();
8245     auto MvecLType = ConvertType(MvecCType);
8246     assert(MvecLType->isStructTy() &&
8247            "Return type for vld[24]q should be a struct");
8248     assert(MvecLType->getStructNumElements() == 1 &&
8249            "Return-type struct for vld[24]q should have one element");
8250     auto MvecLTypeInner = MvecLType->getStructElementType(0);
8251     assert(MvecLTypeInner->isArrayTy() &&
8252            "Return-type struct for vld[24]q should contain an array");
8253     assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
8254            "Array member of return-type struct vld[24]q has wrong length");
8255     auto VecLType = MvecLTypeInner->getArrayElementType();
8256 
8257     Tys.push_back(VecLType);
8258 
8259     auto Addr = E->getArg(0);
8260     Ops.push_back(EmitScalarExpr(Addr));
8261     Tys.push_back(ConvertType(Addr->getType()));
8262 
8263     Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
8264     Value *LoadResult = Builder.CreateCall(F, Ops);
8265     Value *MvecOut = UndefValue::get(MvecLType);
8266     for (unsigned i = 0; i < NumVectors; ++i) {
8267       Value *Vec = Builder.CreateExtractValue(LoadResult, i);
8268       MvecOut = Builder.CreateInsertValue(MvecOut, Vec, {0, i});
8269     }
8270 
8271     if (ReturnValue.isNull())
8272       return MvecOut;
8273     else
8274       return Builder.CreateStore(MvecOut, ReturnValue.getValue());
8275   }
8276 
8277   case CustomCodeGen::VST24: {
8278     llvm::SmallVector<Value *, 4> Ops;
8279     llvm::SmallVector<llvm::Type *, 4> Tys;
8280 
8281     auto Addr = E->getArg(0);
8282     Ops.push_back(EmitScalarExpr(Addr));
8283     Tys.push_back(ConvertType(Addr->getType()));
8284 
8285     auto MvecCType = E->getArg(1)->getType();
8286     auto MvecLType = ConvertType(MvecCType);
8287     assert(MvecLType->isStructTy() && "Data type for vst2q should be a struct");
8288     assert(MvecLType->getStructNumElements() == 1 &&
8289            "Data-type struct for vst2q should have one element");
8290     auto MvecLTypeInner = MvecLType->getStructElementType(0);
8291     assert(MvecLTypeInner->isArrayTy() &&
8292            "Data-type struct for vst2q should contain an array");
8293     assert(MvecLTypeInner->getArrayNumElements() == NumVectors &&
8294            "Array member of return-type struct vld[24]q has wrong length");
8295     auto VecLType = MvecLTypeInner->getArrayElementType();
8296 
8297     Tys.push_back(VecLType);
8298 
8299     AggValueSlot MvecSlot = CreateAggTemp(MvecCType);
8300     EmitAggExpr(E->getArg(1), MvecSlot);
8301     auto Mvec = Builder.CreateLoad(MvecSlot.getAddress());
8302     for (unsigned i = 0; i < NumVectors; i++)
8303       Ops.push_back(Builder.CreateExtractValue(Mvec, {0, i}));
8304 
8305     Function *F = CGM.getIntrinsic(IRIntr, makeArrayRef(Tys));
8306     Value *ToReturn = nullptr;
8307     for (unsigned i = 0; i < NumVectors; i++) {
8308       Ops.push_back(llvm::ConstantInt::get(Int32Ty, i));
8309       ToReturn = Builder.CreateCall(F, Ops);
8310       Ops.pop_back();
8311     }
8312     return ToReturn;
8313   }
8314   }
8315   llvm_unreachable("unknown custom codegen type.");
8316 }
8317 
8318 Value *CodeGenFunction::EmitARMCDEBuiltinExpr(unsigned BuiltinID,
8319                                               const CallExpr *E,
8320                                               ReturnValueSlot ReturnValue,
8321                                               llvm::Triple::ArchType Arch) {
8322   switch (BuiltinID) {
8323   default:
8324     return nullptr;
8325 #include "clang/Basic/arm_cde_builtin_cg.inc"
8326   }
8327 }
8328 
8329 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
8330                                       const CallExpr *E,
8331                                       SmallVectorImpl<Value *> &Ops,
8332                                       llvm::Triple::ArchType Arch) {
8333   unsigned int Int = 0;
8334   const char *s = nullptr;
8335 
8336   switch (BuiltinID) {
8337   default:
8338     return nullptr;
8339   case NEON::BI__builtin_neon_vtbl1_v:
8340   case NEON::BI__builtin_neon_vqtbl1_v:
8341   case NEON::BI__builtin_neon_vqtbl1q_v:
8342   case NEON::BI__builtin_neon_vtbl2_v:
8343   case NEON::BI__builtin_neon_vqtbl2_v:
8344   case NEON::BI__builtin_neon_vqtbl2q_v:
8345   case NEON::BI__builtin_neon_vtbl3_v:
8346   case NEON::BI__builtin_neon_vqtbl3_v:
8347   case NEON::BI__builtin_neon_vqtbl3q_v:
8348   case NEON::BI__builtin_neon_vtbl4_v:
8349   case NEON::BI__builtin_neon_vqtbl4_v:
8350   case NEON::BI__builtin_neon_vqtbl4q_v:
8351     break;
8352   case NEON::BI__builtin_neon_vtbx1_v:
8353   case NEON::BI__builtin_neon_vqtbx1_v:
8354   case NEON::BI__builtin_neon_vqtbx1q_v:
8355   case NEON::BI__builtin_neon_vtbx2_v:
8356   case NEON::BI__builtin_neon_vqtbx2_v:
8357   case NEON::BI__builtin_neon_vqtbx2q_v:
8358   case NEON::BI__builtin_neon_vtbx3_v:
8359   case NEON::BI__builtin_neon_vqtbx3_v:
8360   case NEON::BI__builtin_neon_vqtbx3q_v:
8361   case NEON::BI__builtin_neon_vtbx4_v:
8362   case NEON::BI__builtin_neon_vqtbx4_v:
8363   case NEON::BI__builtin_neon_vqtbx4q_v:
8364     break;
8365   }
8366 
8367   assert(E->getNumArgs() >= 3);
8368 
8369   // Get the last argument, which specifies the vector type.
8370   const Expr *Arg = E->getArg(E->getNumArgs() - 1);
8371   Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(CGF.getContext());
8372   if (!Result)
8373     return nullptr;
8374 
8375   // Determine the type of this overloaded NEON intrinsic.
8376   NeonTypeFlags Type = Result->getZExtValue();
8377   llvm::FixedVectorType *Ty = GetNeonType(&CGF, Type);
8378   if (!Ty)
8379     return nullptr;
8380 
8381   CodeGen::CGBuilderTy &Builder = CGF.Builder;
8382 
8383   // AArch64 scalar builtins are not overloaded, they do not have an extra
8384   // argument that specifies the vector type, need to handle each case.
8385   switch (BuiltinID) {
8386   case NEON::BI__builtin_neon_vtbl1_v: {
8387     return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 1), nullptr,
8388                               Ops[1], Ty, Intrinsic::aarch64_neon_tbl1,
8389                               "vtbl1");
8390   }
8391   case NEON::BI__builtin_neon_vtbl2_v: {
8392     return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 2), nullptr,
8393                               Ops[2], Ty, Intrinsic::aarch64_neon_tbl1,
8394                               "vtbl1");
8395   }
8396   case NEON::BI__builtin_neon_vtbl3_v: {
8397     return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 3), nullptr,
8398                               Ops[3], Ty, Intrinsic::aarch64_neon_tbl2,
8399                               "vtbl2");
8400   }
8401   case NEON::BI__builtin_neon_vtbl4_v: {
8402     return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(0, 4), nullptr,
8403                               Ops[4], Ty, Intrinsic::aarch64_neon_tbl2,
8404                               "vtbl2");
8405   }
8406   case NEON::BI__builtin_neon_vtbx1_v: {
8407     Value *TblRes =
8408         packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 1), nullptr, Ops[2],
8409                            Ty, Intrinsic::aarch64_neon_tbl1, "vtbl1");
8410 
8411     llvm::Constant *EightV = ConstantInt::get(Ty, 8);
8412     Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
8413     CmpRes = Builder.CreateSExt(CmpRes, Ty);
8414 
8415     Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8416     Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8417     return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8418   }
8419   case NEON::BI__builtin_neon_vtbx2_v: {
8420     return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 2), Ops[0],
8421                               Ops[3], Ty, Intrinsic::aarch64_neon_tbx1,
8422                               "vtbx1");
8423   }
8424   case NEON::BI__builtin_neon_vtbx3_v: {
8425     Value *TblRes =
8426         packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 3), nullptr, Ops[4],
8427                            Ty, Intrinsic::aarch64_neon_tbl2, "vtbl2");
8428 
8429     llvm::Constant *TwentyFourV = ConstantInt::get(Ty, 24);
8430     Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
8431                                            TwentyFourV);
8432     CmpRes = Builder.CreateSExt(CmpRes, Ty);
8433 
8434     Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
8435     Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
8436     return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
8437   }
8438   case NEON::BI__builtin_neon_vtbx4_v: {
8439     return packTBLDVectorList(CGF, makeArrayRef(Ops).slice(1, 4), Ops[0],
8440                               Ops[5], Ty, Intrinsic::aarch64_neon_tbx2,
8441                               "vtbx2");
8442   }
8443   case NEON::BI__builtin_neon_vqtbl1_v:
8444   case NEON::BI__builtin_neon_vqtbl1q_v:
8445     Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
8446   case NEON::BI__builtin_neon_vqtbl2_v:
8447   case NEON::BI__builtin_neon_vqtbl2q_v: {
8448     Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
8449   case NEON::BI__builtin_neon_vqtbl3_v:
8450   case NEON::BI__builtin_neon_vqtbl3q_v:
8451     Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
8452   case NEON::BI__builtin_neon_vqtbl4_v:
8453   case NEON::BI__builtin_neon_vqtbl4q_v:
8454     Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
8455   case NEON::BI__builtin_neon_vqtbx1_v:
8456   case NEON::BI__builtin_neon_vqtbx1q_v:
8457     Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
8458   case NEON::BI__builtin_neon_vqtbx2_v:
8459   case NEON::BI__builtin_neon_vqtbx2q_v:
8460     Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
8461   case NEON::BI__builtin_neon_vqtbx3_v:
8462   case NEON::BI__builtin_neon_vqtbx3q_v:
8463     Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
8464   case NEON::BI__builtin_neon_vqtbx4_v:
8465   case NEON::BI__builtin_neon_vqtbx4q_v:
8466     Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
8467   }
8468   }
8469 
8470   if (!Int)
8471     return nullptr;
8472 
8473   Function *F = CGF.CGM.getIntrinsic(Int, Ty);
8474   return CGF.EmitNeonCall(F, Ops, s);
8475 }
8476 
8477 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
8478   auto *VTy = llvm::FixedVectorType::get(Int16Ty, 4);
8479   Op = Builder.CreateBitCast(Op, Int16Ty);
8480   Value *V = UndefValue::get(VTy);
8481   llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
8482   Op = Builder.CreateInsertElement(V, Op, CI);
8483   return Op;
8484 }
8485 
8486 /// SVEBuiltinMemEltTy - Returns the memory element type for this memory
8487 /// access builtin.  Only required if it can't be inferred from the base pointer
8488 /// operand.
8489 llvm::Type *CodeGenFunction::SVEBuiltinMemEltTy(const SVETypeFlags &TypeFlags) {
8490   switch (TypeFlags.getMemEltType()) {
8491   case SVETypeFlags::MemEltTyDefault:
8492     return getEltType(TypeFlags);
8493   case SVETypeFlags::MemEltTyInt8:
8494     return Builder.getInt8Ty();
8495   case SVETypeFlags::MemEltTyInt16:
8496     return Builder.getInt16Ty();
8497   case SVETypeFlags::MemEltTyInt32:
8498     return Builder.getInt32Ty();
8499   case SVETypeFlags::MemEltTyInt64:
8500     return Builder.getInt64Ty();
8501   }
8502   llvm_unreachable("Unknown MemEltType");
8503 }
8504 
8505 llvm::Type *CodeGenFunction::getEltType(const SVETypeFlags &TypeFlags) {
8506   switch (TypeFlags.getEltType()) {
8507   default:
8508     llvm_unreachable("Invalid SVETypeFlag!");
8509 
8510   case SVETypeFlags::EltTyInt8:
8511     return Builder.getInt8Ty();
8512   case SVETypeFlags::EltTyInt16:
8513     return Builder.getInt16Ty();
8514   case SVETypeFlags::EltTyInt32:
8515     return Builder.getInt32Ty();
8516   case SVETypeFlags::EltTyInt64:
8517     return Builder.getInt64Ty();
8518 
8519   case SVETypeFlags::EltTyFloat16:
8520     return Builder.getHalfTy();
8521   case SVETypeFlags::EltTyFloat32:
8522     return Builder.getFloatTy();
8523   case SVETypeFlags::EltTyFloat64:
8524     return Builder.getDoubleTy();
8525 
8526   case SVETypeFlags::EltTyBFloat16:
8527     return Builder.getBFloatTy();
8528 
8529   case SVETypeFlags::EltTyBool8:
8530   case SVETypeFlags::EltTyBool16:
8531   case SVETypeFlags::EltTyBool32:
8532   case SVETypeFlags::EltTyBool64:
8533     return Builder.getInt1Ty();
8534   }
8535 }
8536 
8537 // Return the llvm predicate vector type corresponding to the specified element
8538 // TypeFlags.
8539 llvm::ScalableVectorType *
8540 CodeGenFunction::getSVEPredType(const SVETypeFlags &TypeFlags) {
8541   switch (TypeFlags.getEltType()) {
8542   default: llvm_unreachable("Unhandled SVETypeFlag!");
8543 
8544   case SVETypeFlags::EltTyInt8:
8545     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8546   case SVETypeFlags::EltTyInt16:
8547     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8548   case SVETypeFlags::EltTyInt32:
8549     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8550   case SVETypeFlags::EltTyInt64:
8551     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8552 
8553   case SVETypeFlags::EltTyBFloat16:
8554     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8555   case SVETypeFlags::EltTyFloat16:
8556     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8557   case SVETypeFlags::EltTyFloat32:
8558     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8559   case SVETypeFlags::EltTyFloat64:
8560     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8561 
8562   case SVETypeFlags::EltTyBool8:
8563     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8564   case SVETypeFlags::EltTyBool16:
8565     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8566   case SVETypeFlags::EltTyBool32:
8567     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8568   case SVETypeFlags::EltTyBool64:
8569     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8570   }
8571 }
8572 
8573 // Return the llvm vector type corresponding to the specified element TypeFlags.
8574 llvm::ScalableVectorType *
8575 CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
8576   switch (TypeFlags.getEltType()) {
8577   default:
8578     llvm_unreachable("Invalid SVETypeFlag!");
8579 
8580   case SVETypeFlags::EltTyInt8:
8581     return llvm::ScalableVectorType::get(Builder.getInt8Ty(), 16);
8582   case SVETypeFlags::EltTyInt16:
8583     return llvm::ScalableVectorType::get(Builder.getInt16Ty(), 8);
8584   case SVETypeFlags::EltTyInt32:
8585     return llvm::ScalableVectorType::get(Builder.getInt32Ty(), 4);
8586   case SVETypeFlags::EltTyInt64:
8587     return llvm::ScalableVectorType::get(Builder.getInt64Ty(), 2);
8588 
8589   case SVETypeFlags::EltTyFloat16:
8590     return llvm::ScalableVectorType::get(Builder.getHalfTy(), 8);
8591   case SVETypeFlags::EltTyBFloat16:
8592     return llvm::ScalableVectorType::get(Builder.getBFloatTy(), 8);
8593   case SVETypeFlags::EltTyFloat32:
8594     return llvm::ScalableVectorType::get(Builder.getFloatTy(), 4);
8595   case SVETypeFlags::EltTyFloat64:
8596     return llvm::ScalableVectorType::get(Builder.getDoubleTy(), 2);
8597 
8598   case SVETypeFlags::EltTyBool8:
8599     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
8600   case SVETypeFlags::EltTyBool16:
8601     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 8);
8602   case SVETypeFlags::EltTyBool32:
8603     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 4);
8604   case SVETypeFlags::EltTyBool64:
8605     return llvm::ScalableVectorType::get(Builder.getInt1Ty(), 2);
8606   }
8607 }
8608 
8609 llvm::Value *
8610 CodeGenFunction::EmitSVEAllTruePred(const SVETypeFlags &TypeFlags) {
8611   Function *Ptrue =
8612       CGM.getIntrinsic(Intrinsic::aarch64_sve_ptrue, getSVEPredType(TypeFlags));
8613   return Builder.CreateCall(Ptrue, {Builder.getInt32(/*SV_ALL*/ 31)});
8614 }
8615 
8616 constexpr unsigned SVEBitsPerBlock = 128;
8617 
8618 static llvm::ScalableVectorType *getSVEVectorForElementType(llvm::Type *EltTy) {
8619   unsigned NumElts = SVEBitsPerBlock / EltTy->getScalarSizeInBits();
8620   return llvm::ScalableVectorType::get(EltTy, NumElts);
8621 }
8622 
8623 // Reinterpret the input predicate so that it can be used to correctly isolate
8624 // the elements of the specified datatype.
8625 Value *CodeGenFunction::EmitSVEPredicateCast(Value *Pred,
8626                                              llvm::ScalableVectorType *VTy) {
8627   auto *RTy = llvm::VectorType::get(IntegerType::get(getLLVMContext(), 1), VTy);
8628   if (Pred->getType() == RTy)
8629     return Pred;
8630 
8631   unsigned IntID;
8632   llvm::Type *IntrinsicTy;
8633   switch (VTy->getMinNumElements()) {
8634   default:
8635     llvm_unreachable("unsupported element count!");
8636   case 2:
8637   case 4:
8638   case 8:
8639     IntID = Intrinsic::aarch64_sve_convert_from_svbool;
8640     IntrinsicTy = RTy;
8641     break;
8642   case 16:
8643     IntID = Intrinsic::aarch64_sve_convert_to_svbool;
8644     IntrinsicTy = Pred->getType();
8645     break;
8646   }
8647 
8648   Function *F = CGM.getIntrinsic(IntID, IntrinsicTy);
8649   Value *C = Builder.CreateCall(F, Pred);
8650   assert(C->getType() == RTy && "Unexpected return type!");
8651   return C;
8652 }
8653 
8654 Value *CodeGenFunction::EmitSVEGatherLoad(const SVETypeFlags &TypeFlags,
8655                                           SmallVectorImpl<Value *> &Ops,
8656                                           unsigned IntID) {
8657   auto *ResultTy = getSVEType(TypeFlags);
8658   auto *OverloadedTy =
8659       llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), ResultTy);
8660 
8661   // At the ACLE level there's only one predicate type, svbool_t, which is
8662   // mapped to <n x 16 x i1>. However, this might be incompatible with the
8663   // actual type being loaded. For example, when loading doubles (i64) the
8664   // predicated should be <n x 2 x i1> instead. At the IR level the type of
8665   // the predicate and the data being loaded must match. Cast accordingly.
8666   Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8667 
8668   Function *F = nullptr;
8669   if (Ops[1]->getType()->isVectorTy())
8670     // This is the "vector base, scalar offset" case. In order to uniquely
8671     // map this built-in to an LLVM IR intrinsic, we need both the return type
8672     // and the type of the vector base.
8673     F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[1]->getType()});
8674   else
8675     // This is the "scalar base, vector offset case". The type of the offset
8676     // is encoded in the name of the intrinsic. We only need to specify the
8677     // return type in order to uniquely map this built-in to an LLVM IR
8678     // intrinsic.
8679     F = CGM.getIntrinsic(IntID, OverloadedTy);
8680 
8681   // Pass 0 when the offset is missing. This can only be applied when using
8682   // the "vector base" addressing mode for which ACLE allows no offset. The
8683   // corresponding LLVM IR always requires an offset.
8684   if (Ops.size() == 2) {
8685     assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
8686     Ops.push_back(ConstantInt::get(Int64Ty, 0));
8687   }
8688 
8689   // For "vector base, scalar index" scale the index so that it becomes a
8690   // scalar offset.
8691   if (!TypeFlags.isByteIndexed() && Ops[1]->getType()->isVectorTy()) {
8692     unsigned BytesPerElt =
8693         OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8694     Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8695     Ops[2] = Builder.CreateMul(Ops[2], Scale);
8696   }
8697 
8698   Value *Call = Builder.CreateCall(F, Ops);
8699 
8700   // The following sext/zext is only needed when ResultTy != OverloadedTy. In
8701   // other cases it's folded into a nop.
8702   return TypeFlags.isZExtReturn() ? Builder.CreateZExt(Call, ResultTy)
8703                                   : Builder.CreateSExt(Call, ResultTy);
8704 }
8705 
8706 Value *CodeGenFunction::EmitSVEScatterStore(const SVETypeFlags &TypeFlags,
8707                                             SmallVectorImpl<Value *> &Ops,
8708                                             unsigned IntID) {
8709   auto *SrcDataTy = getSVEType(TypeFlags);
8710   auto *OverloadedTy =
8711       llvm::ScalableVectorType::get(SVEBuiltinMemEltTy(TypeFlags), SrcDataTy);
8712 
8713   // In ACLE the source data is passed in the last argument, whereas in LLVM IR
8714   // it's the first argument. Move it accordingly.
8715   Ops.insert(Ops.begin(), Ops.pop_back_val());
8716 
8717   Function *F = nullptr;
8718   if (Ops[2]->getType()->isVectorTy())
8719     // This is the "vector base, scalar offset" case. In order to uniquely
8720     // map this built-in to an LLVM IR intrinsic, we need both the return type
8721     // and the type of the vector base.
8722     F = CGM.getIntrinsic(IntID, {OverloadedTy, Ops[2]->getType()});
8723   else
8724     // This is the "scalar base, vector offset case". The type of the offset
8725     // is encoded in the name of the intrinsic. We only need to specify the
8726     // return type in order to uniquely map this built-in to an LLVM IR
8727     // intrinsic.
8728     F = CGM.getIntrinsic(IntID, OverloadedTy);
8729 
8730   // Pass 0 when the offset is missing. This can only be applied when using
8731   // the "vector base" addressing mode for which ACLE allows no offset. The
8732   // corresponding LLVM IR always requires an offset.
8733   if (Ops.size() == 3) {
8734     assert(Ops[1]->getType()->isVectorTy() && "Scalar base requires an offset");
8735     Ops.push_back(ConstantInt::get(Int64Ty, 0));
8736   }
8737 
8738   // Truncation is needed when SrcDataTy != OverloadedTy. In other cases it's
8739   // folded into a nop.
8740   Ops[0] = Builder.CreateTrunc(Ops[0], OverloadedTy);
8741 
8742   // At the ACLE level there's only one predicate type, svbool_t, which is
8743   // mapped to <n x 16 x i1>. However, this might be incompatible with the
8744   // actual type being stored. For example, when storing doubles (i64) the
8745   // predicated should be <n x 2 x i1> instead. At the IR level the type of
8746   // the predicate and the data being stored must match. Cast accordingly.
8747   Ops[1] = EmitSVEPredicateCast(Ops[1], OverloadedTy);
8748 
8749   // For "vector base, scalar index" scale the index so that it becomes a
8750   // scalar offset.
8751   if (!TypeFlags.isByteIndexed() && Ops[2]->getType()->isVectorTy()) {
8752     unsigned BytesPerElt =
8753         OverloadedTy->getElementType()->getScalarSizeInBits() / 8;
8754     Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8755     Ops[3] = Builder.CreateMul(Ops[3], Scale);
8756   }
8757 
8758   return Builder.CreateCall(F, Ops);
8759 }
8760 
8761 Value *CodeGenFunction::EmitSVEGatherPrefetch(const SVETypeFlags &TypeFlags,
8762                                               SmallVectorImpl<Value *> &Ops,
8763                                               unsigned IntID) {
8764   // The gather prefetches are overloaded on the vector input - this can either
8765   // be the vector of base addresses or vector of offsets.
8766   auto *OverloadedTy = dyn_cast<llvm::ScalableVectorType>(Ops[1]->getType());
8767   if (!OverloadedTy)
8768     OverloadedTy = cast<llvm::ScalableVectorType>(Ops[2]->getType());
8769 
8770   // Cast the predicate from svbool_t to the right number of elements.
8771   Ops[0] = EmitSVEPredicateCast(Ops[0], OverloadedTy);
8772 
8773   // vector + imm addressing modes
8774   if (Ops[1]->getType()->isVectorTy()) {
8775     if (Ops.size() == 3) {
8776       // Pass 0 for 'vector+imm' when the index is omitted.
8777       Ops.push_back(ConstantInt::get(Int64Ty, 0));
8778 
8779       // The sv_prfop is the last operand in the builtin and IR intrinsic.
8780       std::swap(Ops[2], Ops[3]);
8781     } else {
8782       // Index needs to be passed as scaled offset.
8783       llvm::Type *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8784       unsigned BytesPerElt = MemEltTy->getPrimitiveSizeInBits() / 8;
8785       Value *Scale = ConstantInt::get(Int64Ty, BytesPerElt);
8786       Ops[2] = Builder.CreateMul(Ops[2], Scale);
8787     }
8788   }
8789 
8790   Function *F = CGM.getIntrinsic(IntID, OverloadedTy);
8791   return Builder.CreateCall(F, Ops);
8792 }
8793 
8794 Value *CodeGenFunction::EmitSVEStructLoad(const SVETypeFlags &TypeFlags,
8795                                           SmallVectorImpl<Value*> &Ops,
8796                                           unsigned IntID) {
8797   llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8798   auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8799   auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8800 
8801   unsigned N;
8802   switch (IntID) {
8803   case Intrinsic::aarch64_sve_ld2:
8804     N = 2;
8805     break;
8806   case Intrinsic::aarch64_sve_ld3:
8807     N = 3;
8808     break;
8809   case Intrinsic::aarch64_sve_ld4:
8810     N = 4;
8811     break;
8812   default:
8813     llvm_unreachable("unknown intrinsic!");
8814   }
8815   auto RetTy = llvm::VectorType::get(VTy->getElementType(),
8816                                      VTy->getElementCount() * N);
8817 
8818 	Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8819   Value *BasePtr= Builder.CreateBitCast(Ops[1], VecPtrTy);
8820   Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8821   BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8822   BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8823 
8824   Function *F = CGM.getIntrinsic(IntID, {RetTy, Predicate->getType()});
8825   return Builder.CreateCall(F, { Predicate, BasePtr });
8826 }
8827 
8828 Value *CodeGenFunction::EmitSVEStructStore(const SVETypeFlags &TypeFlags,
8829                                            SmallVectorImpl<Value*> &Ops,
8830                                            unsigned IntID) {
8831   llvm::ScalableVectorType *VTy = getSVEType(TypeFlags);
8832   auto VecPtrTy = llvm::PointerType::getUnqual(VTy);
8833   auto EltPtrTy = llvm::PointerType::getUnqual(VTy->getElementType());
8834 
8835   unsigned N;
8836   switch (IntID) {
8837   case Intrinsic::aarch64_sve_st2:
8838     N = 2;
8839     break;
8840   case Intrinsic::aarch64_sve_st3:
8841     N = 3;
8842     break;
8843   case Intrinsic::aarch64_sve_st4:
8844     N = 4;
8845     break;
8846   default:
8847     llvm_unreachable("unknown intrinsic!");
8848   }
8849   auto TupleTy =
8850       llvm::VectorType::get(VTy->getElementType(), VTy->getElementCount() * N);
8851 
8852   Value *Predicate = EmitSVEPredicateCast(Ops[0], VTy);
8853   Value *BasePtr = Builder.CreateBitCast(Ops[1], VecPtrTy);
8854   Value *Offset = Ops.size() > 3 ? Ops[2] : Builder.getInt32(0);
8855   Value *Val = Ops.back();
8856   BasePtr = Builder.CreateGEP(VTy, BasePtr, Offset);
8857   BasePtr = Builder.CreateBitCast(BasePtr, EltPtrTy);
8858 
8859   // The llvm.aarch64.sve.st2/3/4 intrinsics take legal part vectors, so we
8860   // need to break up the tuple vector.
8861   SmallVector<llvm::Value*, 5> Operands;
8862   Function *FExtr =
8863       CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
8864   for (unsigned I = 0; I < N; ++I)
8865     Operands.push_back(Builder.CreateCall(FExtr, {Val, Builder.getInt32(I)}));
8866   Operands.append({Predicate, BasePtr});
8867 
8868   Function *F = CGM.getIntrinsic(IntID, { VTy });
8869   return Builder.CreateCall(F, Operands);
8870 }
8871 
8872 // SVE2's svpmullb and svpmullt builtins are similar to the svpmullb_pair and
8873 // svpmullt_pair intrinsics, with the exception that their results are bitcast
8874 // to a wider type.
8875 Value *CodeGenFunction::EmitSVEPMull(const SVETypeFlags &TypeFlags,
8876                                      SmallVectorImpl<Value *> &Ops,
8877                                      unsigned BuiltinID) {
8878   // Splat scalar operand to vector (intrinsics with _n infix)
8879   if (TypeFlags.hasSplatOperand()) {
8880     unsigned OpNo = TypeFlags.getSplatOperand();
8881     Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
8882   }
8883 
8884   // The pair-wise function has a narrower overloaded type.
8885   Function *F = CGM.getIntrinsic(BuiltinID, Ops[0]->getType());
8886   Value *Call = Builder.CreateCall(F, {Ops[0], Ops[1]});
8887 
8888   // Now bitcast to the wider result type.
8889   llvm::ScalableVectorType *Ty = getSVEType(TypeFlags);
8890   return EmitSVEReinterpret(Call, Ty);
8891 }
8892 
8893 Value *CodeGenFunction::EmitSVEMovl(const SVETypeFlags &TypeFlags,
8894                                     ArrayRef<Value *> Ops, unsigned BuiltinID) {
8895   llvm::Type *OverloadedTy = getSVEType(TypeFlags);
8896   Function *F = CGM.getIntrinsic(BuiltinID, OverloadedTy);
8897   return Builder.CreateCall(F, {Ops[0], Builder.getInt32(0)});
8898 }
8899 
8900 Value *CodeGenFunction::EmitSVEPrefetchLoad(const SVETypeFlags &TypeFlags,
8901                                             SmallVectorImpl<Value *> &Ops,
8902                                             unsigned BuiltinID) {
8903   auto *MemEltTy = SVEBuiltinMemEltTy(TypeFlags);
8904   auto *VectorTy = getSVEVectorForElementType(MemEltTy);
8905   auto *MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8906 
8907   Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8908   Value *BasePtr = Ops[1];
8909 
8910   // Implement the index operand if not omitted.
8911   if (Ops.size() > 3) {
8912     BasePtr = Builder.CreateBitCast(BasePtr, MemoryTy->getPointerTo());
8913     BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Ops[2]);
8914   }
8915 
8916   // Prefetch intriniscs always expect an i8*
8917   BasePtr = Builder.CreateBitCast(BasePtr, llvm::PointerType::getUnqual(Int8Ty));
8918   Value *PrfOp = Ops.back();
8919 
8920   Function *F = CGM.getIntrinsic(BuiltinID, Predicate->getType());
8921   return Builder.CreateCall(F, {Predicate, BasePtr, PrfOp});
8922 }
8923 
8924 Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
8925                                           llvm::Type *ReturnTy,
8926                                           SmallVectorImpl<Value *> &Ops,
8927                                           unsigned BuiltinID,
8928                                           bool IsZExtReturn) {
8929   QualType LangPTy = E->getArg(1)->getType();
8930   llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8931       LangPTy->castAs<PointerType>()->getPointeeType());
8932 
8933   // The vector type that is returned may be different from the
8934   // eventual type loaded from memory.
8935   auto VectorTy = cast<llvm::ScalableVectorType>(ReturnTy);
8936   auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8937 
8938   Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8939   Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8940   Value *Offset = Ops.size() > 2 ? Ops[2] : Builder.getInt32(0);
8941   BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8942 
8943   BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8944   Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8945   Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
8946 
8947   return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
8948                      : Builder.CreateSExt(Load, VectorTy);
8949 }
8950 
8951 Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
8952                                            SmallVectorImpl<Value *> &Ops,
8953                                            unsigned BuiltinID) {
8954   QualType LangPTy = E->getArg(1)->getType();
8955   llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
8956       LangPTy->castAs<PointerType>()->getPointeeType());
8957 
8958   // The vector type that is stored may be different from the
8959   // eventual type stored to memory.
8960   auto VectorTy = cast<llvm::ScalableVectorType>(Ops.back()->getType());
8961   auto MemoryTy = llvm::ScalableVectorType::get(MemEltTy, VectorTy);
8962 
8963   Value *Predicate = EmitSVEPredicateCast(Ops[0], MemoryTy);
8964   Value *BasePtr = Builder.CreateBitCast(Ops[1], MemoryTy->getPointerTo());
8965   Value *Offset = Ops.size() == 4 ? Ops[2] : Builder.getInt32(0);
8966   BasePtr = Builder.CreateGEP(MemoryTy, BasePtr, Offset);
8967 
8968   // Last value is always the data
8969   llvm::Value *Val = Builder.CreateTrunc(Ops.back(), MemoryTy);
8970 
8971   BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
8972   Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
8973   return Builder.CreateCall(F, {Val, Predicate, BasePtr});
8974 }
8975 
8976 // Limit the usage of scalable llvm IR generated by the ACLE by using the
8977 // sve dup.x intrinsic instead of IRBuilder::CreateVectorSplat.
8978 Value *CodeGenFunction::EmitSVEDupX(Value *Scalar, llvm::Type *Ty) {
8979   auto F = CGM.getIntrinsic(Intrinsic::aarch64_sve_dup_x, Ty);
8980   return Builder.CreateCall(F, Scalar);
8981 }
8982 
8983 Value *CodeGenFunction::EmitSVEDupX(Value* Scalar) {
8984   return EmitSVEDupX(Scalar, getSVEVectorForElementType(Scalar->getType()));
8985 }
8986 
8987 Value *CodeGenFunction::EmitSVEReinterpret(Value *Val, llvm::Type *Ty) {
8988   // FIXME: For big endian this needs an additional REV, or needs a separate
8989   // intrinsic that is code-generated as a no-op, because the LLVM bitcast
8990   // instruction is defined as 'bitwise' equivalent from memory point of
8991   // view (when storing/reloading), whereas the svreinterpret builtin
8992   // implements bitwise equivalent cast from register point of view.
8993   // LLVM CodeGen for a bitcast must add an explicit REV for big-endian.
8994   return Builder.CreateBitCast(Val, Ty);
8995 }
8996 
8997 static void InsertExplicitZeroOperand(CGBuilderTy &Builder, llvm::Type *Ty,
8998                                       SmallVectorImpl<Value *> &Ops) {
8999   auto *SplatZero = Constant::getNullValue(Ty);
9000   Ops.insert(Ops.begin(), SplatZero);
9001 }
9002 
9003 static void InsertExplicitUndefOperand(CGBuilderTy &Builder, llvm::Type *Ty,
9004                                        SmallVectorImpl<Value *> &Ops) {
9005   auto *SplatUndef = UndefValue::get(Ty);
9006   Ops.insert(Ops.begin(), SplatUndef);
9007 }
9008 
9009 SmallVector<llvm::Type *, 2>
9010 CodeGenFunction::getSVEOverloadTypes(const SVETypeFlags &TypeFlags,
9011                                      llvm::Type *ResultType,
9012                                      ArrayRef<Value *> Ops) {
9013   if (TypeFlags.isOverloadNone())
9014     return {};
9015 
9016   llvm::Type *DefaultType = getSVEType(TypeFlags);
9017 
9018   if (TypeFlags.isOverloadWhile())
9019     return {DefaultType, Ops[1]->getType()};
9020 
9021   if (TypeFlags.isOverloadWhileRW())
9022     return {getSVEPredType(TypeFlags), Ops[0]->getType()};
9023 
9024   if (TypeFlags.isOverloadCvt() || TypeFlags.isTupleSet())
9025     return {Ops[0]->getType(), Ops.back()->getType()};
9026 
9027   if (TypeFlags.isTupleCreate() || TypeFlags.isTupleGet())
9028     return {ResultType, Ops[0]->getType()};
9029 
9030   assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
9031   return {DefaultType};
9032 }
9033 
9034 Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
9035                                                   const CallExpr *E) {
9036   // Find out if any arguments are required to be integer constant expressions.
9037   unsigned ICEArguments = 0;
9038   ASTContext::GetBuiltinTypeError Error;
9039   getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9040   assert(Error == ASTContext::GE_None && "Should not codegen an error");
9041 
9042   llvm::Type *Ty = ConvertType(E->getType());
9043   if (BuiltinID >= SVE::BI__builtin_sve_reinterpret_s8_s8 &&
9044       BuiltinID <= SVE::BI__builtin_sve_reinterpret_f64_f64) {
9045     Value *Val = EmitScalarExpr(E->getArg(0));
9046     return EmitSVEReinterpret(Val, Ty);
9047   }
9048 
9049   llvm::SmallVector<Value *, 4> Ops;
9050   for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
9051     if ((ICEArguments & (1 << i)) == 0)
9052       Ops.push_back(EmitScalarExpr(E->getArg(i)));
9053     else {
9054       // If this is required to be a constant, constant fold it so that we know
9055       // that the generated intrinsic gets a ConstantInt.
9056       Optional<llvm::APSInt> Result =
9057           E->getArg(i)->getIntegerConstantExpr(getContext());
9058       assert(Result && "Expected argument to be a constant");
9059 
9060       // Immediates for SVE llvm intrinsics are always 32bit.  We can safely
9061       // truncate because the immediate has been range checked and no valid
9062       // immediate requires more than a handful of bits.
9063       *Result = Result->extOrTrunc(32);
9064       Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), *Result));
9065     }
9066   }
9067 
9068   auto *Builtin = findARMVectorIntrinsicInMap(AArch64SVEIntrinsicMap, BuiltinID,
9069                                               AArch64SVEIntrinsicsProvenSorted);
9070   SVETypeFlags TypeFlags(Builtin->TypeModifier);
9071   if (TypeFlags.isLoad())
9072     return EmitSVEMaskedLoad(E, Ty, Ops, Builtin->LLVMIntrinsic,
9073                              TypeFlags.isZExtReturn());
9074   else if (TypeFlags.isStore())
9075     return EmitSVEMaskedStore(E, Ops, Builtin->LLVMIntrinsic);
9076   else if (TypeFlags.isGatherLoad())
9077     return EmitSVEGatherLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9078   else if (TypeFlags.isScatterStore())
9079     return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9080   else if (TypeFlags.isPrefetch())
9081     return EmitSVEPrefetchLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9082   else if (TypeFlags.isGatherPrefetch())
9083     return EmitSVEGatherPrefetch(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9084 	else if (TypeFlags.isStructLoad())
9085 		return EmitSVEStructLoad(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9086 	else if (TypeFlags.isStructStore())
9087 		return EmitSVEStructStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
9088   else if (TypeFlags.isUndef())
9089     return UndefValue::get(Ty);
9090   else if (Builtin->LLVMIntrinsic != 0) {
9091     if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
9092       InsertExplicitZeroOperand(Builder, Ty, Ops);
9093 
9094     if (TypeFlags.getMergeType() == SVETypeFlags::MergeAnyExp)
9095       InsertExplicitUndefOperand(Builder, Ty, Ops);
9096 
9097     // Some ACLE builtins leave out the argument to specify the predicate
9098     // pattern, which is expected to be expanded to an SV_ALL pattern.
9099     if (TypeFlags.isAppendSVALL())
9100       Ops.push_back(Builder.getInt32(/*SV_ALL*/ 31));
9101     if (TypeFlags.isInsertOp1SVALL())
9102       Ops.insert(&Ops[1], Builder.getInt32(/*SV_ALL*/ 31));
9103 
9104     // Predicates must match the main datatype.
9105     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
9106       if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
9107         if (PredTy->getElementType()->isIntegerTy(1))
9108           Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
9109 
9110     // Splat scalar operand to vector (intrinsics with _n infix)
9111     if (TypeFlags.hasSplatOperand()) {
9112       unsigned OpNo = TypeFlags.getSplatOperand();
9113       Ops[OpNo] = EmitSVEDupX(Ops[OpNo]);
9114     }
9115 
9116     if (TypeFlags.isReverseCompare())
9117       std::swap(Ops[1], Ops[2]);
9118 
9119     if (TypeFlags.isReverseUSDOT())
9120       std::swap(Ops[1], Ops[2]);
9121 
9122     // Predicated intrinsics with _z suffix need a select w/ zeroinitializer.
9123     if (TypeFlags.getMergeType() == SVETypeFlags::MergeZero) {
9124       llvm::Type *OpndTy = Ops[1]->getType();
9125       auto *SplatZero = Constant::getNullValue(OpndTy);
9126       Function *Sel = CGM.getIntrinsic(Intrinsic::aarch64_sve_sel, OpndTy);
9127       Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
9128     }
9129 
9130     Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
9131                                    getSVEOverloadTypes(TypeFlags, Ty, Ops));
9132     Value *Call = Builder.CreateCall(F, Ops);
9133 
9134     // Predicate results must be converted to svbool_t.
9135     if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
9136       if (PredTy->getScalarType()->isIntegerTy(1))
9137         Call = EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
9138 
9139     return Call;
9140   }
9141 
9142   switch (BuiltinID) {
9143   default:
9144     return nullptr;
9145 
9146   case SVE::BI__builtin_sve_svmov_b_z: {
9147     // svmov_b_z(pg, op) <=> svand_b_z(pg, op, op)
9148     SVETypeFlags TypeFlags(Builtin->TypeModifier);
9149     llvm::Type* OverloadedTy = getSVEType(TypeFlags);
9150     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_and_z, OverloadedTy);
9151     return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[1]});
9152   }
9153 
9154   case SVE::BI__builtin_sve_svnot_b_z: {
9155     // svnot_b_z(pg, op) <=> sveor_b_z(pg, op, pg)
9156     SVETypeFlags TypeFlags(Builtin->TypeModifier);
9157     llvm::Type* OverloadedTy = getSVEType(TypeFlags);
9158     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_eor_z, OverloadedTy);
9159     return Builder.CreateCall(F, {Ops[0], Ops[1], Ops[0]});
9160   }
9161 
9162   case SVE::BI__builtin_sve_svmovlb_u16:
9163   case SVE::BI__builtin_sve_svmovlb_u32:
9164   case SVE::BI__builtin_sve_svmovlb_u64:
9165     return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllb);
9166 
9167   case SVE::BI__builtin_sve_svmovlb_s16:
9168   case SVE::BI__builtin_sve_svmovlb_s32:
9169   case SVE::BI__builtin_sve_svmovlb_s64:
9170     return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllb);
9171 
9172   case SVE::BI__builtin_sve_svmovlt_u16:
9173   case SVE::BI__builtin_sve_svmovlt_u32:
9174   case SVE::BI__builtin_sve_svmovlt_u64:
9175     return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_ushllt);
9176 
9177   case SVE::BI__builtin_sve_svmovlt_s16:
9178   case SVE::BI__builtin_sve_svmovlt_s32:
9179   case SVE::BI__builtin_sve_svmovlt_s64:
9180     return EmitSVEMovl(TypeFlags, Ops, Intrinsic::aarch64_sve_sshllt);
9181 
9182   case SVE::BI__builtin_sve_svpmullt_u16:
9183   case SVE::BI__builtin_sve_svpmullt_u64:
9184   case SVE::BI__builtin_sve_svpmullt_n_u16:
9185   case SVE::BI__builtin_sve_svpmullt_n_u64:
9186     return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullt_pair);
9187 
9188   case SVE::BI__builtin_sve_svpmullb_u16:
9189   case SVE::BI__builtin_sve_svpmullb_u64:
9190   case SVE::BI__builtin_sve_svpmullb_n_u16:
9191   case SVE::BI__builtin_sve_svpmullb_n_u64:
9192     return EmitSVEPMull(TypeFlags, Ops, Intrinsic::aarch64_sve_pmullb_pair);
9193 
9194   case SVE::BI__builtin_sve_svdup_n_b8:
9195   case SVE::BI__builtin_sve_svdup_n_b16:
9196   case SVE::BI__builtin_sve_svdup_n_b32:
9197   case SVE::BI__builtin_sve_svdup_n_b64: {
9198     Value *CmpNE =
9199         Builder.CreateICmpNE(Ops[0], Constant::getNullValue(Ops[0]->getType()));
9200     llvm::ScalableVectorType *OverloadedTy = getSVEType(TypeFlags);
9201     Value *Dup = EmitSVEDupX(CmpNE, OverloadedTy);
9202     return EmitSVEPredicateCast(Dup, cast<llvm::ScalableVectorType>(Ty));
9203   }
9204 
9205   case SVE::BI__builtin_sve_svdupq_n_b8:
9206   case SVE::BI__builtin_sve_svdupq_n_b16:
9207   case SVE::BI__builtin_sve_svdupq_n_b32:
9208   case SVE::BI__builtin_sve_svdupq_n_b64:
9209   case SVE::BI__builtin_sve_svdupq_n_u8:
9210   case SVE::BI__builtin_sve_svdupq_n_s8:
9211   case SVE::BI__builtin_sve_svdupq_n_u64:
9212   case SVE::BI__builtin_sve_svdupq_n_f64:
9213   case SVE::BI__builtin_sve_svdupq_n_s64:
9214   case SVE::BI__builtin_sve_svdupq_n_u16:
9215   case SVE::BI__builtin_sve_svdupq_n_f16:
9216   case SVE::BI__builtin_sve_svdupq_n_bf16:
9217   case SVE::BI__builtin_sve_svdupq_n_s16:
9218   case SVE::BI__builtin_sve_svdupq_n_u32:
9219   case SVE::BI__builtin_sve_svdupq_n_f32:
9220   case SVE::BI__builtin_sve_svdupq_n_s32: {
9221     // These builtins are implemented by storing each element to an array and using
9222     // ld1rq to materialize a vector.
9223     unsigned NumOpnds = Ops.size();
9224 
9225     bool IsBoolTy =
9226         cast<llvm::VectorType>(Ty)->getElementType()->isIntegerTy(1);
9227 
9228     // For svdupq_n_b* the element type of is an integer of type 128/numelts,
9229     // so that the compare can use the width that is natural for the expected
9230     // number of predicate lanes.
9231     llvm::Type *EltTy = Ops[0]->getType();
9232     if (IsBoolTy)
9233       EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
9234 
9235     SmallVector<llvm::Value *, 16> VecOps;
9236     for (unsigned I = 0; I < NumOpnds; ++I)
9237         VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
9238     Value *Vec = BuildVector(VecOps);
9239 
9240     SVETypeFlags TypeFlags(Builtin->TypeModifier);
9241     Value *Pred = EmitSVEAllTruePred(TypeFlags);
9242 
9243     llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
9244     Value *InsertSubVec = Builder.CreateInsertVector(
9245         OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0));
9246 
9247     Function *F =
9248         CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
9249     Value *DupQLane =
9250         Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)});
9251 
9252     if (!IsBoolTy)
9253       return DupQLane;
9254 
9255     // For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
9256     F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
9257                                        : Intrinsic::aarch64_sve_cmpne_wide,
9258                          OverloadedTy);
9259     Value *Call = Builder.CreateCall(
9260         F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))});
9261     return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
9262   }
9263 
9264   case SVE::BI__builtin_sve_svpfalse_b:
9265     return ConstantInt::getFalse(Ty);
9266 
9267   case SVE::BI__builtin_sve_svlen_bf16:
9268   case SVE::BI__builtin_sve_svlen_f16:
9269   case SVE::BI__builtin_sve_svlen_f32:
9270   case SVE::BI__builtin_sve_svlen_f64:
9271   case SVE::BI__builtin_sve_svlen_s8:
9272   case SVE::BI__builtin_sve_svlen_s16:
9273   case SVE::BI__builtin_sve_svlen_s32:
9274   case SVE::BI__builtin_sve_svlen_s64:
9275   case SVE::BI__builtin_sve_svlen_u8:
9276   case SVE::BI__builtin_sve_svlen_u16:
9277   case SVE::BI__builtin_sve_svlen_u32:
9278   case SVE::BI__builtin_sve_svlen_u64: {
9279     SVETypeFlags TF(Builtin->TypeModifier);
9280     auto VTy = cast<llvm::VectorType>(getSVEType(TF));
9281     auto *NumEls =
9282         llvm::ConstantInt::get(Ty, VTy->getElementCount().getKnownMinValue());
9283 
9284     Function *F = CGM.getIntrinsic(Intrinsic::vscale, Ty);
9285     return Builder.CreateMul(NumEls, Builder.CreateCall(F));
9286   }
9287 
9288   case SVE::BI__builtin_sve_svtbl2_u8:
9289   case SVE::BI__builtin_sve_svtbl2_s8:
9290   case SVE::BI__builtin_sve_svtbl2_u16:
9291   case SVE::BI__builtin_sve_svtbl2_s16:
9292   case SVE::BI__builtin_sve_svtbl2_u32:
9293   case SVE::BI__builtin_sve_svtbl2_s32:
9294   case SVE::BI__builtin_sve_svtbl2_u64:
9295   case SVE::BI__builtin_sve_svtbl2_s64:
9296   case SVE::BI__builtin_sve_svtbl2_f16:
9297   case SVE::BI__builtin_sve_svtbl2_bf16:
9298   case SVE::BI__builtin_sve_svtbl2_f32:
9299   case SVE::BI__builtin_sve_svtbl2_f64: {
9300     SVETypeFlags TF(Builtin->TypeModifier);
9301     auto VTy = cast<llvm::VectorType>(getSVEType(TF));
9302     auto TupleTy = llvm::VectorType::getDoubleElementsVectorType(VTy);
9303     Function *FExtr =
9304         CGM.getIntrinsic(Intrinsic::aarch64_sve_tuple_get, {VTy, TupleTy});
9305     Value *V0 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(0)});
9306     Value *V1 = Builder.CreateCall(FExtr, {Ops[0], Builder.getInt32(1)});
9307     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_tbl2, VTy);
9308     return Builder.CreateCall(F, {V0, V1, Ops[1]});
9309   }
9310   }
9311 
9312   /// Should not happen
9313   return nullptr;
9314 }
9315 
9316 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
9317                                                const CallExpr *E,
9318                                                llvm::Triple::ArchType Arch) {
9319   if (BuiltinID >= AArch64::FirstSVEBuiltin &&
9320       BuiltinID <= AArch64::LastSVEBuiltin)
9321     return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
9322 
9323   unsigned HintID = static_cast<unsigned>(-1);
9324   switch (BuiltinID) {
9325   default: break;
9326   case AArch64::BI__builtin_arm_nop:
9327     HintID = 0;
9328     break;
9329   case AArch64::BI__builtin_arm_yield:
9330   case AArch64::BI__yield:
9331     HintID = 1;
9332     break;
9333   case AArch64::BI__builtin_arm_wfe:
9334   case AArch64::BI__wfe:
9335     HintID = 2;
9336     break;
9337   case AArch64::BI__builtin_arm_wfi:
9338   case AArch64::BI__wfi:
9339     HintID = 3;
9340     break;
9341   case AArch64::BI__builtin_arm_sev:
9342   case AArch64::BI__sev:
9343     HintID = 4;
9344     break;
9345   case AArch64::BI__builtin_arm_sevl:
9346   case AArch64::BI__sevl:
9347     HintID = 5;
9348     break;
9349   }
9350 
9351   if (HintID != static_cast<unsigned>(-1)) {
9352     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
9353     return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
9354   }
9355 
9356   if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
9357     Value *Address         = EmitScalarExpr(E->getArg(0));
9358     Value *RW              = EmitScalarExpr(E->getArg(1));
9359     Value *CacheLevel      = EmitScalarExpr(E->getArg(2));
9360     Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
9361     Value *IsData          = EmitScalarExpr(E->getArg(4));
9362 
9363     Value *Locality = nullptr;
9364     if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
9365       // Temporal fetch, needs to convert cache level to locality.
9366       Locality = llvm::ConstantInt::get(Int32Ty,
9367         -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
9368     } else {
9369       // Streaming fetch.
9370       Locality = llvm::ConstantInt::get(Int32Ty, 0);
9371     }
9372 
9373     // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
9374     // PLDL3STRM or PLDL2STRM.
9375     Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
9376     return Builder.CreateCall(F, {Address, RW, Locality, IsData});
9377   }
9378 
9379   if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
9380     assert((getContext().getTypeSize(E->getType()) == 32) &&
9381            "rbit of unusual size!");
9382     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9383     return Builder.CreateCall(
9384         CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
9385   }
9386   if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
9387     assert((getContext().getTypeSize(E->getType()) == 64) &&
9388            "rbit of unusual size!");
9389     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9390     return Builder.CreateCall(
9391         CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
9392   }
9393 
9394   if (BuiltinID == AArch64::BI__builtin_arm_cls) {
9395     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9396     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
9397                               "cls");
9398   }
9399   if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
9400     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9401     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
9402                               "cls");
9403   }
9404 
9405   if (BuiltinID == AArch64::BI__builtin_arm_frint32zf ||
9406       BuiltinID == AArch64::BI__builtin_arm_frint32z) {
9407     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9408     llvm::Type *Ty = Arg->getType();
9409     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
9410                               Arg, "frint32z");
9411   }
9412 
9413   if (BuiltinID == AArch64::BI__builtin_arm_frint64zf ||
9414       BuiltinID == AArch64::BI__builtin_arm_frint64z) {
9415     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9416     llvm::Type *Ty = Arg->getType();
9417     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
9418                               Arg, "frint64z");
9419   }
9420 
9421   if (BuiltinID == AArch64::BI__builtin_arm_frint32xf ||
9422       BuiltinID == AArch64::BI__builtin_arm_frint32x) {
9423     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9424     llvm::Type *Ty = Arg->getType();
9425     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
9426                               Arg, "frint32x");
9427   }
9428 
9429   if (BuiltinID == AArch64::BI__builtin_arm_frint64xf ||
9430       BuiltinID == AArch64::BI__builtin_arm_frint64x) {
9431     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9432     llvm::Type *Ty = Arg->getType();
9433     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
9434                               Arg, "frint64x");
9435   }
9436 
9437   if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
9438     assert((getContext().getTypeSize(E->getType()) == 32) &&
9439            "__jcvt of unusual size!");
9440     llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
9441     return Builder.CreateCall(
9442         CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
9443   }
9444 
9445   if (BuiltinID == AArch64::BI__builtin_arm_ld64b ||
9446       BuiltinID == AArch64::BI__builtin_arm_st64b ||
9447       BuiltinID == AArch64::BI__builtin_arm_st64bv ||
9448       BuiltinID == AArch64::BI__builtin_arm_st64bv0) {
9449     llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
9450     llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
9451 
9452     if (BuiltinID == AArch64::BI__builtin_arm_ld64b) {
9453       // Load from the address via an LLVM intrinsic, receiving a
9454       // tuple of 8 i64 words, and store each one to ValPtr.
9455       Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
9456       llvm::Value *Val = Builder.CreateCall(F, MemAddr);
9457       llvm::Value *ToRet;
9458       for (size_t i = 0; i < 8; i++) {
9459         llvm::Value *ValOffsetPtr =
9460             Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
9461         Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
9462         ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
9463       }
9464       return ToRet;
9465     } else {
9466       // Load 8 i64 words from ValPtr, and store them to the address
9467       // via an LLVM intrinsic.
9468       SmallVector<llvm::Value *, 9> Args;
9469       Args.push_back(MemAddr);
9470       for (size_t i = 0; i < 8; i++) {
9471         llvm::Value *ValOffsetPtr =
9472             Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
9473         Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
9474         Args.push_back(Builder.CreateLoad(Addr));
9475       }
9476 
9477       auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b
9478                        ? Intrinsic::aarch64_st64b
9479                        : BuiltinID == AArch64::BI__builtin_arm_st64bv
9480                              ? Intrinsic::aarch64_st64bv
9481                              : Intrinsic::aarch64_st64bv0);
9482       Function *F = CGM.getIntrinsic(Intr);
9483       return Builder.CreateCall(F, Args);
9484     }
9485   }
9486 
9487   if (BuiltinID == AArch64::BI__builtin_arm_rndr ||
9488       BuiltinID == AArch64::BI__builtin_arm_rndrrs) {
9489 
9490     auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr
9491                      ? Intrinsic::aarch64_rndr
9492                      : Intrinsic::aarch64_rndrrs);
9493     Function *F = CGM.getIntrinsic(Intr);
9494     llvm::Value *Val = Builder.CreateCall(F);
9495     Value *RandomValue = Builder.CreateExtractValue(Val, 0);
9496     Value *Status = Builder.CreateExtractValue(Val, 1);
9497 
9498     Address MemAddress = EmitPointerWithAlignment(E->getArg(0));
9499     Builder.CreateStore(RandomValue, MemAddress);
9500     Status = Builder.CreateZExt(Status, Int32Ty);
9501     return Status;
9502   }
9503 
9504   if (BuiltinID == AArch64::BI__clear_cache) {
9505     assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
9506     const FunctionDecl *FD = E->getDirectCallee();
9507     Value *Ops[2];
9508     for (unsigned i = 0; i < 2; i++)
9509       Ops[i] = EmitScalarExpr(E->getArg(i));
9510     llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
9511     llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
9512     StringRef Name = FD->getName();
9513     return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
9514   }
9515 
9516   if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
9517       BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
9518       getContext().getTypeSize(E->getType()) == 128) {
9519     Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
9520                                        ? Intrinsic::aarch64_ldaxp
9521                                        : Intrinsic::aarch64_ldxp);
9522 
9523     Value *LdPtr = EmitScalarExpr(E->getArg(0));
9524     Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
9525                                     "ldxp");
9526 
9527     Value *Val0 = Builder.CreateExtractValue(Val, 1);
9528     Value *Val1 = Builder.CreateExtractValue(Val, 0);
9529     llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
9530     Val0 = Builder.CreateZExt(Val0, Int128Ty);
9531     Val1 = Builder.CreateZExt(Val1, Int128Ty);
9532 
9533     Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
9534     Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
9535     Val = Builder.CreateOr(Val, Val1);
9536     return Builder.CreateBitCast(Val, ConvertType(E->getType()));
9537   } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
9538              BuiltinID == AArch64::BI__builtin_arm_ldaex) {
9539     Value *LoadAddr = EmitScalarExpr(E->getArg(0));
9540 
9541     QualType Ty = E->getType();
9542     llvm::Type *RealResTy = ConvertType(Ty);
9543     llvm::Type *PtrTy = llvm::IntegerType::get(
9544         getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
9545     LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
9546 
9547     Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
9548                                        ? Intrinsic::aarch64_ldaxr
9549                                        : Intrinsic::aarch64_ldxr,
9550                                    PtrTy);
9551     Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
9552 
9553     if (RealResTy->isPointerTy())
9554       return Builder.CreateIntToPtr(Val, RealResTy);
9555 
9556     llvm::Type *IntResTy = llvm::IntegerType::get(
9557         getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
9558     Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
9559     return Builder.CreateBitCast(Val, RealResTy);
9560   }
9561 
9562   if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
9563        BuiltinID == AArch64::BI__builtin_arm_stlex) &&
9564       getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
9565     Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9566                                        ? Intrinsic::aarch64_stlxp
9567                                        : Intrinsic::aarch64_stxp);
9568     llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
9569 
9570     Address Tmp = CreateMemTemp(E->getArg(0)->getType());
9571     EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
9572 
9573     Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
9574     llvm::Value *Val = Builder.CreateLoad(Tmp);
9575 
9576     Value *Arg0 = Builder.CreateExtractValue(Val, 0);
9577     Value *Arg1 = Builder.CreateExtractValue(Val, 1);
9578     Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
9579                                          Int8PtrTy);
9580     return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
9581   }
9582 
9583   if (BuiltinID == AArch64::BI__builtin_arm_strex ||
9584       BuiltinID == AArch64::BI__builtin_arm_stlex) {
9585     Value *StoreVal = EmitScalarExpr(E->getArg(0));
9586     Value *StoreAddr = EmitScalarExpr(E->getArg(1));
9587 
9588     QualType Ty = E->getArg(0)->getType();
9589     llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
9590                                                  getContext().getTypeSize(Ty));
9591     StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
9592 
9593     if (StoreVal->getType()->isPointerTy())
9594       StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
9595     else {
9596       llvm::Type *IntTy = llvm::IntegerType::get(
9597           getLLVMContext(),
9598           CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
9599       StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
9600       StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
9601     }
9602 
9603     Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
9604                                        ? Intrinsic::aarch64_stlxr
9605                                        : Intrinsic::aarch64_stxr,
9606                                    StoreAddr->getType());
9607     return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
9608   }
9609 
9610   if (BuiltinID == AArch64::BI__getReg) {
9611     Expr::EvalResult Result;
9612     if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
9613       llvm_unreachable("Sema will ensure that the parameter is constant");
9614 
9615     llvm::APSInt Value = Result.Val.getInt();
9616     LLVMContext &Context = CGM.getLLVMContext();
9617     std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10);
9618 
9619     llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
9620     llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9621     llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9622 
9623     llvm::Function *F =
9624         CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
9625     return Builder.CreateCall(F, Metadata);
9626   }
9627 
9628   if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
9629     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
9630     return Builder.CreateCall(F);
9631   }
9632 
9633   if (BuiltinID == AArch64::BI_ReadWriteBarrier)
9634     return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
9635                                llvm::SyncScope::SingleThread);
9636 
9637   // CRC32
9638   Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
9639   switch (BuiltinID) {
9640   case AArch64::BI__builtin_arm_crc32b:
9641     CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
9642   case AArch64::BI__builtin_arm_crc32cb:
9643     CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
9644   case AArch64::BI__builtin_arm_crc32h:
9645     CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
9646   case AArch64::BI__builtin_arm_crc32ch:
9647     CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
9648   case AArch64::BI__builtin_arm_crc32w:
9649     CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
9650   case AArch64::BI__builtin_arm_crc32cw:
9651     CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
9652   case AArch64::BI__builtin_arm_crc32d:
9653     CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
9654   case AArch64::BI__builtin_arm_crc32cd:
9655     CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
9656   }
9657 
9658   if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
9659     Value *Arg0 = EmitScalarExpr(E->getArg(0));
9660     Value *Arg1 = EmitScalarExpr(E->getArg(1));
9661     Function *F = CGM.getIntrinsic(CRCIntrinsicID);
9662 
9663     llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
9664     Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
9665 
9666     return Builder.CreateCall(F, {Arg0, Arg1});
9667   }
9668 
9669   // Memory Tagging Extensions (MTE) Intrinsics
9670   Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
9671   switch (BuiltinID) {
9672   case AArch64::BI__builtin_arm_irg:
9673     MTEIntrinsicID = Intrinsic::aarch64_irg; break;
9674   case  AArch64::BI__builtin_arm_addg:
9675     MTEIntrinsicID = Intrinsic::aarch64_addg; break;
9676   case  AArch64::BI__builtin_arm_gmi:
9677     MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
9678   case  AArch64::BI__builtin_arm_ldg:
9679     MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
9680   case AArch64::BI__builtin_arm_stg:
9681     MTEIntrinsicID = Intrinsic::aarch64_stg; break;
9682   case AArch64::BI__builtin_arm_subp:
9683     MTEIntrinsicID = Intrinsic::aarch64_subp; break;
9684   }
9685 
9686   if (MTEIntrinsicID != Intrinsic::not_intrinsic) {
9687     llvm::Type *T = ConvertType(E->getType());
9688 
9689     if (MTEIntrinsicID == Intrinsic::aarch64_irg) {
9690       Value *Pointer = EmitScalarExpr(E->getArg(0));
9691       Value *Mask = EmitScalarExpr(E->getArg(1));
9692 
9693       Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9694       Mask = Builder.CreateZExt(Mask, Int64Ty);
9695       Value *RV = Builder.CreateCall(
9696                        CGM.getIntrinsic(MTEIntrinsicID), {Pointer, Mask});
9697        return Builder.CreatePointerCast(RV, T);
9698     }
9699     if (MTEIntrinsicID == Intrinsic::aarch64_addg) {
9700       Value *Pointer = EmitScalarExpr(E->getArg(0));
9701       Value *TagOffset = EmitScalarExpr(E->getArg(1));
9702 
9703       Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9704       TagOffset = Builder.CreateZExt(TagOffset, Int64Ty);
9705       Value *RV = Builder.CreateCall(
9706                        CGM.getIntrinsic(MTEIntrinsicID), {Pointer, TagOffset});
9707       return Builder.CreatePointerCast(RV, T);
9708     }
9709     if (MTEIntrinsicID == Intrinsic::aarch64_gmi) {
9710       Value *Pointer = EmitScalarExpr(E->getArg(0));
9711       Value *ExcludedMask = EmitScalarExpr(E->getArg(1));
9712 
9713       ExcludedMask = Builder.CreateZExt(ExcludedMask, Int64Ty);
9714       Pointer = Builder.CreatePointerCast(Pointer, Int8PtrTy);
9715       return Builder.CreateCall(
9716                        CGM.getIntrinsic(MTEIntrinsicID), {Pointer, ExcludedMask});
9717     }
9718     // Although it is possible to supply a different return
9719     // address (first arg) to this intrinsic, for now we set
9720     // return address same as input address.
9721     if (MTEIntrinsicID == Intrinsic::aarch64_ldg) {
9722       Value *TagAddress = EmitScalarExpr(E->getArg(0));
9723       TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9724       Value *RV = Builder.CreateCall(
9725                     CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9726       return Builder.CreatePointerCast(RV, T);
9727     }
9728     // Although it is possible to supply a different tag (to set)
9729     // to this intrinsic (as first arg), for now we supply
9730     // the tag that is in input address arg (common use case).
9731     if (MTEIntrinsicID == Intrinsic::aarch64_stg) {
9732         Value *TagAddress = EmitScalarExpr(E->getArg(0));
9733         TagAddress = Builder.CreatePointerCast(TagAddress, Int8PtrTy);
9734         return Builder.CreateCall(
9735                  CGM.getIntrinsic(MTEIntrinsicID), {TagAddress, TagAddress});
9736     }
9737     if (MTEIntrinsicID == Intrinsic::aarch64_subp) {
9738       Value *PointerA = EmitScalarExpr(E->getArg(0));
9739       Value *PointerB = EmitScalarExpr(E->getArg(1));
9740       PointerA = Builder.CreatePointerCast(PointerA, Int8PtrTy);
9741       PointerB = Builder.CreatePointerCast(PointerB, Int8PtrTy);
9742       return Builder.CreateCall(
9743                        CGM.getIntrinsic(MTEIntrinsicID), {PointerA, PointerB});
9744     }
9745   }
9746 
9747   if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9748       BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9749       BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9750       BuiltinID == AArch64::BI__builtin_arm_wsr ||
9751       BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
9752       BuiltinID == AArch64::BI__builtin_arm_wsrp) {
9753 
9754     SpecialRegisterAccessKind AccessKind = Write;
9755     if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
9756         BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
9757         BuiltinID == AArch64::BI__builtin_arm_rsrp)
9758       AccessKind = VolatileRead;
9759 
9760     bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
9761                             BuiltinID == AArch64::BI__builtin_arm_wsrp;
9762 
9763     bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
9764                    BuiltinID != AArch64::BI__builtin_arm_wsr;
9765 
9766     llvm::Type *ValueType;
9767     llvm::Type *RegisterType = Int64Ty;
9768     if (IsPointerBuiltin) {
9769       ValueType = VoidPtrTy;
9770     } else if (Is64Bit) {
9771       ValueType = Int64Ty;
9772     } else {
9773       ValueType = Int32Ty;
9774     }
9775 
9776     return EmitSpecialRegisterBuiltin(*this, E, RegisterType, ValueType,
9777                                       AccessKind);
9778   }
9779 
9780   if (BuiltinID == AArch64::BI_ReadStatusReg ||
9781       BuiltinID == AArch64::BI_WriteStatusReg) {
9782     LLVMContext &Context = CGM.getLLVMContext();
9783 
9784     unsigned SysReg =
9785       E->getArg(0)->EvaluateKnownConstInt(getContext()).getZExtValue();
9786 
9787     std::string SysRegStr;
9788     llvm::raw_string_ostream(SysRegStr) <<
9789                        ((1 << 1) | ((SysReg >> 14) & 1))  << ":" <<
9790                        ((SysReg >> 11) & 7)               << ":" <<
9791                        ((SysReg >> 7)  & 15)              << ":" <<
9792                        ((SysReg >> 3)  & 15)              << ":" <<
9793                        ( SysReg        & 7);
9794 
9795     llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysRegStr) };
9796     llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
9797     llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
9798 
9799     llvm::Type *RegisterType = Int64Ty;
9800     llvm::Type *Types[] = { RegisterType };
9801 
9802     if (BuiltinID == AArch64::BI_ReadStatusReg) {
9803       llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
9804 
9805       return Builder.CreateCall(F, Metadata);
9806     }
9807 
9808     llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::write_register, Types);
9809     llvm::Value *ArgValue = EmitScalarExpr(E->getArg(1));
9810 
9811     return Builder.CreateCall(F, { Metadata, ArgValue });
9812   }
9813 
9814   if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
9815     llvm::Function *F =
9816         CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
9817     return Builder.CreateCall(F);
9818   }
9819 
9820   if (BuiltinID == AArch64::BI__builtin_sponentry) {
9821     llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
9822     return Builder.CreateCall(F);
9823   }
9824 
9825   if (BuiltinID == AArch64::BI__mulh || BuiltinID == AArch64::BI__umulh) {
9826     llvm::Type *ResType = ConvertType(E->getType());
9827     llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
9828 
9829     bool IsSigned = BuiltinID == AArch64::BI__mulh;
9830     Value *LHS =
9831         Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned);
9832     Value *RHS =
9833         Builder.CreateIntCast(EmitScalarExpr(E->getArg(1)), Int128Ty, IsSigned);
9834 
9835     Value *MulResult, *HigherBits;
9836     if (IsSigned) {
9837       MulResult = Builder.CreateNSWMul(LHS, RHS);
9838       HigherBits = Builder.CreateAShr(MulResult, 64);
9839     } else {
9840       MulResult = Builder.CreateNUWMul(LHS, RHS);
9841       HigherBits = Builder.CreateLShr(MulResult, 64);
9842     }
9843     HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
9844 
9845     return HigherBits;
9846   }
9847 
9848   // Handle MSVC intrinsics before argument evaluation to prevent double
9849   // evaluation.
9850   if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
9851     return EmitMSVCBuiltinExpr(*MsvcIntId, E);
9852 
9853   // Find out if any arguments are required to be integer constant
9854   // expressions.
9855   unsigned ICEArguments = 0;
9856   ASTContext::GetBuiltinTypeError Error;
9857   getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
9858   assert(Error == ASTContext::GE_None && "Should not codegen an error");
9859 
9860   llvm::SmallVector<Value*, 4> Ops;
9861   Address PtrOp0 = Address::invalid();
9862   for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
9863     if (i == 0) {
9864       switch (BuiltinID) {
9865       case NEON::BI__builtin_neon_vld1_v:
9866       case NEON::BI__builtin_neon_vld1q_v:
9867       case NEON::BI__builtin_neon_vld1_dup_v:
9868       case NEON::BI__builtin_neon_vld1q_dup_v:
9869       case NEON::BI__builtin_neon_vld1_lane_v:
9870       case NEON::BI__builtin_neon_vld1q_lane_v:
9871       case NEON::BI__builtin_neon_vst1_v:
9872       case NEON::BI__builtin_neon_vst1q_v:
9873       case NEON::BI__builtin_neon_vst1_lane_v:
9874       case NEON::BI__builtin_neon_vst1q_lane_v:
9875         // Get the alignment for the argument in addition to the value;
9876         // we'll use it later.
9877         PtrOp0 = EmitPointerWithAlignment(E->getArg(0));
9878         Ops.push_back(PtrOp0.getPointer());
9879         continue;
9880       }
9881     }
9882     if ((ICEArguments & (1 << i)) == 0) {
9883       Ops.push_back(EmitScalarExpr(E->getArg(i)));
9884     } else {
9885       // If this is required to be a constant, constant fold it so that we know
9886       // that the generated intrinsic gets a ConstantInt.
9887       Ops.push_back(llvm::ConstantInt::get(
9888           getLLVMContext(),
9889           *E->getArg(i)->getIntegerConstantExpr(getContext())));
9890     }
9891   }
9892 
9893   auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
9894   const ARMVectorIntrinsicInfo *Builtin = findARMVectorIntrinsicInMap(
9895       SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
9896 
9897   if (Builtin) {
9898     Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
9899     Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
9900     assert(Result && "SISD intrinsic should have been handled");
9901     return Result;
9902   }
9903 
9904   const Expr *Arg = E->getArg(E->getNumArgs()-1);
9905   NeonTypeFlags Type(0);
9906   if (Optional<llvm::APSInt> Result = Arg->getIntegerConstantExpr(getContext()))
9907     // Determine the type of this overloaded NEON intrinsic.
9908     Type = NeonTypeFlags(Result->getZExtValue());
9909 
9910   bool usgn = Type.isUnsigned();
9911   bool quad = Type.isQuad();
9912 
9913   // Handle non-overloaded intrinsics first.
9914   switch (BuiltinID) {
9915   default: break;
9916   case NEON::BI__builtin_neon_vabsh_f16:
9917     Ops.push_back(EmitScalarExpr(E->getArg(0)));
9918     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
9919   case NEON::BI__builtin_neon_vaddq_p128: {
9920     llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128);
9921     Ops.push_back(EmitScalarExpr(E->getArg(1)));
9922     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
9923     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
9924     Ops[0] =  Builder.CreateXor(Ops[0], Ops[1]);
9925     llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
9926     return Builder.CreateBitCast(Ops[0], Int128Ty);
9927   }
9928   case NEON::BI__builtin_neon_vldrq_p128: {
9929     llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
9930     llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
9931     Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
9932     return Builder.CreateAlignedLoad(Int128Ty, Ptr,
9933                                      CharUnits::fromQuantity(16));
9934   }
9935   case NEON::BI__builtin_neon_vstrq_p128: {
9936     llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
9937     Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
9938     return Builder.CreateDefaultAlignedStore(EmitScalarExpr(E->getArg(1)), Ptr);
9939   }
9940   case NEON::BI__builtin_neon_vcvts_f32_u32:
9941   case NEON::BI__builtin_neon_vcvtd_f64_u64:
9942     usgn = true;
9943     LLVM_FALLTHROUGH;
9944   case NEON::BI__builtin_neon_vcvts_f32_s32:
9945   case NEON::BI__builtin_neon_vcvtd_f64_s64: {
9946     Ops.push_back(EmitScalarExpr(E->getArg(0)));
9947     bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
9948     llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
9949     llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
9950     Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9951     if (usgn)
9952       return Builder.CreateUIToFP(Ops[0], FTy);
9953     return Builder.CreateSIToFP(Ops[0], FTy);
9954   }
9955   case NEON::BI__builtin_neon_vcvth_f16_u16:
9956   case NEON::BI__builtin_neon_vcvth_f16_u32:
9957   case NEON::BI__builtin_neon_vcvth_f16_u64:
9958     usgn = true;
9959     LLVM_FALLTHROUGH;
9960   case NEON::BI__builtin_neon_vcvth_f16_s16:
9961   case NEON::BI__builtin_neon_vcvth_f16_s32:
9962   case NEON::BI__builtin_neon_vcvth_f16_s64: {
9963     Ops.push_back(EmitScalarExpr(E->getArg(0)));
9964     llvm::Type *FTy = HalfTy;
9965     llvm::Type *InTy;
9966     if (Ops[0]->getType()->getPrimitiveSizeInBits() == 64)
9967       InTy = Int64Ty;
9968     else if (Ops[0]->getType()->getPrimitiveSizeInBits() == 32)
9969       InTy = Int32Ty;
9970     else
9971       InTy = Int16Ty;
9972     Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
9973     if (usgn)
9974       return Builder.CreateUIToFP(Ops[0], FTy);
9975     return Builder.CreateSIToFP(Ops[0], FTy);
9976   }
9977   case NEON::BI__builtin_neon_vcvtah_u16_f16:
9978   case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9979   case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9980   case NEON::BI__builtin_neon_vcvtph_u16_f16:
9981   case NEON::BI__builtin_neon_vcvth_u16_f16:
9982   case NEON::BI__builtin_neon_vcvtah_s16_f16:
9983   case NEON::BI__builtin_neon_vcvtmh_s16_f16:
9984   case NEON::BI__builtin_neon_vcvtnh_s16_f16:
9985   case NEON::BI__builtin_neon_vcvtph_s16_f16:
9986   case NEON::BI__builtin_neon_vcvth_s16_f16: {
9987     unsigned Int;
9988     llvm::Type* InTy = Int32Ty;
9989     llvm::Type* FTy  = HalfTy;
9990     llvm::Type *Tys[2] = {InTy, FTy};
9991     Ops.push_back(EmitScalarExpr(E->getArg(0)));
9992     switch (BuiltinID) {
9993     default: llvm_unreachable("missing builtin ID in switch!");
9994     case NEON::BI__builtin_neon_vcvtah_u16_f16:
9995       Int = Intrinsic::aarch64_neon_fcvtau; break;
9996     case NEON::BI__builtin_neon_vcvtmh_u16_f16:
9997       Int = Intrinsic::aarch64_neon_fcvtmu; break;
9998     case NEON::BI__builtin_neon_vcvtnh_u16_f16:
9999       Int = Intrinsic::aarch64_neon_fcvtnu; break;
10000     case NEON::BI__builtin_neon_vcvtph_u16_f16:
10001       Int = Intrinsic::aarch64_neon_fcvtpu; break;
10002     case NEON::BI__builtin_neon_vcvth_u16_f16:
10003       Int = Intrinsic::aarch64_neon_fcvtzu; break;
10004     case NEON::BI__builtin_neon_vcvtah_s16_f16:
10005       Int = Intrinsic::aarch64_neon_fcvtas; break;
10006     case NEON::BI__builtin_neon_vcvtmh_s16_f16:
10007       Int = Intrinsic::aarch64_neon_fcvtms; break;
10008     case NEON::BI__builtin_neon_vcvtnh_s16_f16:
10009       Int = Intrinsic::aarch64_neon_fcvtns; break;
10010     case NEON::BI__builtin_neon_vcvtph_s16_f16:
10011       Int = Intrinsic::aarch64_neon_fcvtps; break;
10012     case NEON::BI__builtin_neon_vcvth_s16_f16:
10013       Int = Intrinsic::aarch64_neon_fcvtzs; break;
10014     }
10015     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvt");
10016     return Builder.CreateTrunc(Ops[0], Int16Ty);
10017   }
10018   case NEON::BI__builtin_neon_vcaleh_f16:
10019   case NEON::BI__builtin_neon_vcalth_f16:
10020   case NEON::BI__builtin_neon_vcageh_f16:
10021   case NEON::BI__builtin_neon_vcagth_f16: {
10022     unsigned Int;
10023     llvm::Type* InTy = Int32Ty;
10024     llvm::Type* FTy  = HalfTy;
10025     llvm::Type *Tys[2] = {InTy, FTy};
10026     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10027     switch (BuiltinID) {
10028     default: llvm_unreachable("missing builtin ID in switch!");
10029     case NEON::BI__builtin_neon_vcageh_f16:
10030       Int = Intrinsic::aarch64_neon_facge; break;
10031     case NEON::BI__builtin_neon_vcagth_f16:
10032       Int = Intrinsic::aarch64_neon_facgt; break;
10033     case NEON::BI__builtin_neon_vcaleh_f16:
10034       Int = Intrinsic::aarch64_neon_facge; std::swap(Ops[0], Ops[1]); break;
10035     case NEON::BI__builtin_neon_vcalth_f16:
10036       Int = Intrinsic::aarch64_neon_facgt; std::swap(Ops[0], Ops[1]); break;
10037     }
10038     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "facg");
10039     return Builder.CreateTrunc(Ops[0], Int16Ty);
10040   }
10041   case NEON::BI__builtin_neon_vcvth_n_s16_f16:
10042   case NEON::BI__builtin_neon_vcvth_n_u16_f16: {
10043     unsigned Int;
10044     llvm::Type* InTy = Int32Ty;
10045     llvm::Type* FTy  = HalfTy;
10046     llvm::Type *Tys[2] = {InTy, FTy};
10047     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10048     switch (BuiltinID) {
10049     default: llvm_unreachable("missing builtin ID in switch!");
10050     case NEON::BI__builtin_neon_vcvth_n_s16_f16:
10051       Int = Intrinsic::aarch64_neon_vcvtfp2fxs; break;
10052     case NEON::BI__builtin_neon_vcvth_n_u16_f16:
10053       Int = Intrinsic::aarch64_neon_vcvtfp2fxu; break;
10054     }
10055     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
10056     return Builder.CreateTrunc(Ops[0], Int16Ty);
10057   }
10058   case NEON::BI__builtin_neon_vcvth_n_f16_s16:
10059   case NEON::BI__builtin_neon_vcvth_n_f16_u16: {
10060     unsigned Int;
10061     llvm::Type* FTy  = HalfTy;
10062     llvm::Type* InTy = Int32Ty;
10063     llvm::Type *Tys[2] = {FTy, InTy};
10064     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10065     switch (BuiltinID) {
10066     default: llvm_unreachable("missing builtin ID in switch!");
10067     case NEON::BI__builtin_neon_vcvth_n_f16_s16:
10068       Int = Intrinsic::aarch64_neon_vcvtfxs2fp;
10069       Ops[0] = Builder.CreateSExt(Ops[0], InTy, "sext");
10070       break;
10071     case NEON::BI__builtin_neon_vcvth_n_f16_u16:
10072       Int = Intrinsic::aarch64_neon_vcvtfxu2fp;
10073       Ops[0] = Builder.CreateZExt(Ops[0], InTy);
10074       break;
10075     }
10076     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "fcvth_n");
10077   }
10078   case NEON::BI__builtin_neon_vpaddd_s64: {
10079     auto *Ty = llvm::FixedVectorType::get(Int64Ty, 2);
10080     Value *Vec = EmitScalarExpr(E->getArg(0));
10081     // The vector is v2f64, so make sure it's bitcast to that.
10082     Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
10083     llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10084     llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10085     Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10086     Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10087     // Pairwise addition of a v2f64 into a scalar f64.
10088     return Builder.CreateAdd(Op0, Op1, "vpaddd");
10089   }
10090   case NEON::BI__builtin_neon_vpaddd_f64: {
10091     auto *Ty = llvm::FixedVectorType::get(DoubleTy, 2);
10092     Value *Vec = EmitScalarExpr(E->getArg(0));
10093     // The vector is v2f64, so make sure it's bitcast to that.
10094     Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
10095     llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10096     llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10097     Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10098     Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10099     // Pairwise addition of a v2f64 into a scalar f64.
10100     return Builder.CreateFAdd(Op0, Op1, "vpaddd");
10101   }
10102   case NEON::BI__builtin_neon_vpadds_f32: {
10103     auto *Ty = llvm::FixedVectorType::get(FloatTy, 2);
10104     Value *Vec = EmitScalarExpr(E->getArg(0));
10105     // The vector is v2f32, so make sure it's bitcast to that.
10106     Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
10107     llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
10108     llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
10109     Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
10110     Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
10111     // Pairwise addition of a v2f32 into a scalar f32.
10112     return Builder.CreateFAdd(Op0, Op1, "vpaddd");
10113   }
10114   case NEON::BI__builtin_neon_vceqzd_s64:
10115   case NEON::BI__builtin_neon_vceqzd_f64:
10116   case NEON::BI__builtin_neon_vceqzs_f32:
10117   case NEON::BI__builtin_neon_vceqzh_f16:
10118     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10119     return EmitAArch64CompareBuiltinExpr(
10120         Ops[0], ConvertType(E->getCallReturnType(getContext())),
10121         ICmpInst::FCMP_OEQ, ICmpInst::ICMP_EQ, "vceqz");
10122   case NEON::BI__builtin_neon_vcgezd_s64:
10123   case NEON::BI__builtin_neon_vcgezd_f64:
10124   case NEON::BI__builtin_neon_vcgezs_f32:
10125   case NEON::BI__builtin_neon_vcgezh_f16:
10126     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10127     return EmitAArch64CompareBuiltinExpr(
10128         Ops[0], ConvertType(E->getCallReturnType(getContext())),
10129         ICmpInst::FCMP_OGE, ICmpInst::ICMP_SGE, "vcgez");
10130   case NEON::BI__builtin_neon_vclezd_s64:
10131   case NEON::BI__builtin_neon_vclezd_f64:
10132   case NEON::BI__builtin_neon_vclezs_f32:
10133   case NEON::BI__builtin_neon_vclezh_f16:
10134     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10135     return EmitAArch64CompareBuiltinExpr(
10136         Ops[0], ConvertType(E->getCallReturnType(getContext())),
10137         ICmpInst::FCMP_OLE, ICmpInst::ICMP_SLE, "vclez");
10138   case NEON::BI__builtin_neon_vcgtzd_s64:
10139   case NEON::BI__builtin_neon_vcgtzd_f64:
10140   case NEON::BI__builtin_neon_vcgtzs_f32:
10141   case NEON::BI__builtin_neon_vcgtzh_f16:
10142     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10143     return EmitAArch64CompareBuiltinExpr(
10144         Ops[0], ConvertType(E->getCallReturnType(getContext())),
10145         ICmpInst::FCMP_OGT, ICmpInst::ICMP_SGT, "vcgtz");
10146   case NEON::BI__builtin_neon_vcltzd_s64:
10147   case NEON::BI__builtin_neon_vcltzd_f64:
10148   case NEON::BI__builtin_neon_vcltzs_f32:
10149   case NEON::BI__builtin_neon_vcltzh_f16:
10150     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10151     return EmitAArch64CompareBuiltinExpr(
10152         Ops[0], ConvertType(E->getCallReturnType(getContext())),
10153         ICmpInst::FCMP_OLT, ICmpInst::ICMP_SLT, "vcltz");
10154 
10155   case NEON::BI__builtin_neon_vceqzd_u64: {
10156     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10157     Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
10158     Ops[0] =
10159         Builder.CreateICmpEQ(Ops[0], llvm::Constant::getNullValue(Int64Ty));
10160     return Builder.CreateSExt(Ops[0], Int64Ty, "vceqzd");
10161   }
10162   case NEON::BI__builtin_neon_vceqd_f64:
10163   case NEON::BI__builtin_neon_vcled_f64:
10164   case NEON::BI__builtin_neon_vcltd_f64:
10165   case NEON::BI__builtin_neon_vcged_f64:
10166   case NEON::BI__builtin_neon_vcgtd_f64: {
10167     llvm::CmpInst::Predicate P;
10168     switch (BuiltinID) {
10169     default: llvm_unreachable("missing builtin ID in switch!");
10170     case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
10171     case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
10172     case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
10173     case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
10174     case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
10175     }
10176     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10177     Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10178     Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
10179     Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
10180     return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
10181   }
10182   case NEON::BI__builtin_neon_vceqs_f32:
10183   case NEON::BI__builtin_neon_vcles_f32:
10184   case NEON::BI__builtin_neon_vclts_f32:
10185   case NEON::BI__builtin_neon_vcges_f32:
10186   case NEON::BI__builtin_neon_vcgts_f32: {
10187     llvm::CmpInst::Predicate P;
10188     switch (BuiltinID) {
10189     default: llvm_unreachable("missing builtin ID in switch!");
10190     case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
10191     case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
10192     case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
10193     case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
10194     case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
10195     }
10196     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10197     Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
10198     Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
10199     Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
10200     return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
10201   }
10202   case NEON::BI__builtin_neon_vceqh_f16:
10203   case NEON::BI__builtin_neon_vcleh_f16:
10204   case NEON::BI__builtin_neon_vclth_f16:
10205   case NEON::BI__builtin_neon_vcgeh_f16:
10206   case NEON::BI__builtin_neon_vcgth_f16: {
10207     llvm::CmpInst::Predicate P;
10208     switch (BuiltinID) {
10209     default: llvm_unreachable("missing builtin ID in switch!");
10210     case NEON::BI__builtin_neon_vceqh_f16: P = llvm::FCmpInst::FCMP_OEQ; break;
10211     case NEON::BI__builtin_neon_vcleh_f16: P = llvm::FCmpInst::FCMP_OLE; break;
10212     case NEON::BI__builtin_neon_vclth_f16: P = llvm::FCmpInst::FCMP_OLT; break;
10213     case NEON::BI__builtin_neon_vcgeh_f16: P = llvm::FCmpInst::FCMP_OGE; break;
10214     case NEON::BI__builtin_neon_vcgth_f16: P = llvm::FCmpInst::FCMP_OGT; break;
10215     }
10216     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10217     Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
10218     Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
10219     Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
10220     return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
10221   }
10222   case NEON::BI__builtin_neon_vceqd_s64:
10223   case NEON::BI__builtin_neon_vceqd_u64:
10224   case NEON::BI__builtin_neon_vcgtd_s64:
10225   case NEON::BI__builtin_neon_vcgtd_u64:
10226   case NEON::BI__builtin_neon_vcltd_s64:
10227   case NEON::BI__builtin_neon_vcltd_u64:
10228   case NEON::BI__builtin_neon_vcged_u64:
10229   case NEON::BI__builtin_neon_vcged_s64:
10230   case NEON::BI__builtin_neon_vcled_u64:
10231   case NEON::BI__builtin_neon_vcled_s64: {
10232     llvm::CmpInst::Predicate P;
10233     switch (BuiltinID) {
10234     default: llvm_unreachable("missing builtin ID in switch!");
10235     case NEON::BI__builtin_neon_vceqd_s64:
10236     case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
10237     case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
10238     case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
10239     case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
10240     case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
10241     case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
10242     case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
10243     case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
10244     case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
10245     }
10246     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10247     Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
10248     Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10249     Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
10250     return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
10251   }
10252   case NEON::BI__builtin_neon_vtstd_s64:
10253   case NEON::BI__builtin_neon_vtstd_u64: {
10254     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10255     Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
10256     Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10257     Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
10258     Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
10259                                 llvm::Constant::getNullValue(Int64Ty));
10260     return Builder.CreateSExt(Ops[0], Int64Ty, "vtstd");
10261   }
10262   case NEON::BI__builtin_neon_vset_lane_i8:
10263   case NEON::BI__builtin_neon_vset_lane_i16:
10264   case NEON::BI__builtin_neon_vset_lane_i32:
10265   case NEON::BI__builtin_neon_vset_lane_i64:
10266   case NEON::BI__builtin_neon_vset_lane_bf16:
10267   case NEON::BI__builtin_neon_vset_lane_f32:
10268   case NEON::BI__builtin_neon_vsetq_lane_i8:
10269   case NEON::BI__builtin_neon_vsetq_lane_i16:
10270   case NEON::BI__builtin_neon_vsetq_lane_i32:
10271   case NEON::BI__builtin_neon_vsetq_lane_i64:
10272   case NEON::BI__builtin_neon_vsetq_lane_bf16:
10273   case NEON::BI__builtin_neon_vsetq_lane_f32:
10274     Ops.push_back(EmitScalarExpr(E->getArg(2)));
10275     return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10276   case NEON::BI__builtin_neon_vset_lane_f64:
10277     // The vector type needs a cast for the v1f64 variant.
10278     Ops[1] =
10279         Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 1));
10280     Ops.push_back(EmitScalarExpr(E->getArg(2)));
10281     return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10282   case NEON::BI__builtin_neon_vsetq_lane_f64:
10283     // The vector type needs a cast for the v2f64 variant.
10284     Ops[1] =
10285         Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(DoubleTy, 2));
10286     Ops.push_back(EmitScalarExpr(E->getArg(2)));
10287     return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
10288 
10289   case NEON::BI__builtin_neon_vget_lane_i8:
10290   case NEON::BI__builtin_neon_vdupb_lane_i8:
10291     Ops[0] =
10292         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 8));
10293     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10294                                         "vget_lane");
10295   case NEON::BI__builtin_neon_vgetq_lane_i8:
10296   case NEON::BI__builtin_neon_vdupb_laneq_i8:
10297     Ops[0] =
10298         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int8Ty, 16));
10299     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10300                                         "vgetq_lane");
10301   case NEON::BI__builtin_neon_vget_lane_i16:
10302   case NEON::BI__builtin_neon_vduph_lane_i16:
10303     Ops[0] =
10304         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 4));
10305     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10306                                         "vget_lane");
10307   case NEON::BI__builtin_neon_vgetq_lane_i16:
10308   case NEON::BI__builtin_neon_vduph_laneq_i16:
10309     Ops[0] =
10310         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int16Ty, 8));
10311     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10312                                         "vgetq_lane");
10313   case NEON::BI__builtin_neon_vget_lane_i32:
10314   case NEON::BI__builtin_neon_vdups_lane_i32:
10315     Ops[0] =
10316         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 2));
10317     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10318                                         "vget_lane");
10319   case NEON::BI__builtin_neon_vdups_lane_f32:
10320     Ops[0] =
10321         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
10322     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10323                                         "vdups_lane");
10324   case NEON::BI__builtin_neon_vgetq_lane_i32:
10325   case NEON::BI__builtin_neon_vdups_laneq_i32:
10326     Ops[0] =
10327         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
10328     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10329                                         "vgetq_lane");
10330   case NEON::BI__builtin_neon_vget_lane_i64:
10331   case NEON::BI__builtin_neon_vdupd_lane_i64:
10332     Ops[0] =
10333         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 1));
10334     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10335                                         "vget_lane");
10336   case NEON::BI__builtin_neon_vdupd_lane_f64:
10337     Ops[0] =
10338         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
10339     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10340                                         "vdupd_lane");
10341   case NEON::BI__builtin_neon_vgetq_lane_i64:
10342   case NEON::BI__builtin_neon_vdupd_laneq_i64:
10343     Ops[0] =
10344         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
10345     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10346                                         "vgetq_lane");
10347   case NEON::BI__builtin_neon_vget_lane_f32:
10348     Ops[0] =
10349         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 2));
10350     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10351                                         "vget_lane");
10352   case NEON::BI__builtin_neon_vget_lane_f64:
10353     Ops[0] =
10354         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 1));
10355     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10356                                         "vget_lane");
10357   case NEON::BI__builtin_neon_vgetq_lane_f32:
10358   case NEON::BI__builtin_neon_vdups_laneq_f32:
10359     Ops[0] =
10360         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(FloatTy, 4));
10361     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10362                                         "vgetq_lane");
10363   case NEON::BI__builtin_neon_vgetq_lane_f64:
10364   case NEON::BI__builtin_neon_vdupd_laneq_f64:
10365     Ops[0] =
10366         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(DoubleTy, 2));
10367     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10368                                         "vgetq_lane");
10369   case NEON::BI__builtin_neon_vaddh_f16:
10370     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10371     return Builder.CreateFAdd(Ops[0], Ops[1], "vaddh");
10372   case NEON::BI__builtin_neon_vsubh_f16:
10373     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10374     return Builder.CreateFSub(Ops[0], Ops[1], "vsubh");
10375   case NEON::BI__builtin_neon_vmulh_f16:
10376     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10377     return Builder.CreateFMul(Ops[0], Ops[1], "vmulh");
10378   case NEON::BI__builtin_neon_vdivh_f16:
10379     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10380     return Builder.CreateFDiv(Ops[0], Ops[1], "vdivh");
10381   case NEON::BI__builtin_neon_vfmah_f16:
10382     // NEON intrinsic puts accumulator first, unlike the LLVM fma.
10383     return emitCallMaybeConstrainedFPBuiltin(
10384         *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
10385         {EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2)), Ops[0]});
10386   case NEON::BI__builtin_neon_vfmsh_f16: {
10387     // FIXME: This should be an fneg instruction:
10388     Value *Zero = llvm::ConstantFP::getZeroValueForNegation(HalfTy);
10389     Value* Sub = Builder.CreateFSub(Zero, EmitScalarExpr(E->getArg(1)), "vsubh");
10390 
10391     // NEON intrinsic puts accumulator first, unlike the LLVM fma.
10392     return emitCallMaybeConstrainedFPBuiltin(
10393         *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, HalfTy,
10394         {Sub, EmitScalarExpr(E->getArg(2)), Ops[0]});
10395   }
10396   case NEON::BI__builtin_neon_vaddd_s64:
10397   case NEON::BI__builtin_neon_vaddd_u64:
10398     return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
10399   case NEON::BI__builtin_neon_vsubd_s64:
10400   case NEON::BI__builtin_neon_vsubd_u64:
10401     return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
10402   case NEON::BI__builtin_neon_vqdmlalh_s16:
10403   case NEON::BI__builtin_neon_vqdmlslh_s16: {
10404     SmallVector<Value *, 2> ProductOps;
10405     ProductOps.push_back(vectorWrapScalar16(Ops[1]));
10406     ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
10407     auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
10408     Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
10409                           ProductOps, "vqdmlXl");
10410     Constant *CI = ConstantInt::get(SizeTy, 0);
10411     Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
10412 
10413     unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
10414                                         ? Intrinsic::aarch64_neon_sqadd
10415                                         : Intrinsic::aarch64_neon_sqsub;
10416     return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
10417   }
10418   case NEON::BI__builtin_neon_vqshlud_n_s64: {
10419     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10420     Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
10421     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
10422                         Ops, "vqshlu_n");
10423   }
10424   case NEON::BI__builtin_neon_vqshld_n_u64:
10425   case NEON::BI__builtin_neon_vqshld_n_s64: {
10426     unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
10427                                    ? Intrinsic::aarch64_neon_uqshl
10428                                    : Intrinsic::aarch64_neon_sqshl;
10429     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10430     Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
10431     return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
10432   }
10433   case NEON::BI__builtin_neon_vrshrd_n_u64:
10434   case NEON::BI__builtin_neon_vrshrd_n_s64: {
10435     unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
10436                                    ? Intrinsic::aarch64_neon_urshl
10437                                    : Intrinsic::aarch64_neon_srshl;
10438     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10439     int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
10440     Ops[1] = ConstantInt::get(Int64Ty, -SV);
10441     return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
10442   }
10443   case NEON::BI__builtin_neon_vrsrad_n_u64:
10444   case NEON::BI__builtin_neon_vrsrad_n_s64: {
10445     unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
10446                                    ? Intrinsic::aarch64_neon_urshl
10447                                    : Intrinsic::aarch64_neon_srshl;
10448     Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
10449     Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
10450     Ops[1] = Builder.CreateCall(CGM.getIntrinsic(Int, Int64Ty),
10451                                 {Ops[1], Builder.CreateSExt(Ops[2], Int64Ty)});
10452     return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
10453   }
10454   case NEON::BI__builtin_neon_vshld_n_s64:
10455   case NEON::BI__builtin_neon_vshld_n_u64: {
10456     llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10457     return Builder.CreateShl(
10458         Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
10459   }
10460   case NEON::BI__builtin_neon_vshrd_n_s64: {
10461     llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10462     return Builder.CreateAShr(
10463         Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
10464                                                    Amt->getZExtValue())),
10465         "shrd_n");
10466   }
10467   case NEON::BI__builtin_neon_vshrd_n_u64: {
10468     llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
10469     uint64_t ShiftAmt = Amt->getZExtValue();
10470     // Right-shifting an unsigned value by its size yields 0.
10471     if (ShiftAmt == 64)
10472       return ConstantInt::get(Int64Ty, 0);
10473     return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
10474                               "shrd_n");
10475   }
10476   case NEON::BI__builtin_neon_vsrad_n_s64: {
10477     llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
10478     Ops[1] = Builder.CreateAShr(
10479         Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
10480                                                    Amt->getZExtValue())),
10481         "shrd_n");
10482     return Builder.CreateAdd(Ops[0], Ops[1]);
10483   }
10484   case NEON::BI__builtin_neon_vsrad_n_u64: {
10485     llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
10486     uint64_t ShiftAmt = Amt->getZExtValue();
10487     // Right-shifting an unsigned value by its size yields 0.
10488     // As Op + 0 = Op, return Ops[0] directly.
10489     if (ShiftAmt == 64)
10490       return Ops[0];
10491     Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
10492                                 "shrd_n");
10493     return Builder.CreateAdd(Ops[0], Ops[1]);
10494   }
10495   case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
10496   case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
10497   case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
10498   case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
10499     Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
10500                                           "lane");
10501     SmallVector<Value *, 2> ProductOps;
10502     ProductOps.push_back(vectorWrapScalar16(Ops[1]));
10503     ProductOps.push_back(vectorWrapScalar16(Ops[2]));
10504     auto *VTy = llvm::FixedVectorType::get(Int32Ty, 4);
10505     Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
10506                           ProductOps, "vqdmlXl");
10507     Constant *CI = ConstantInt::get(SizeTy, 0);
10508     Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
10509     Ops.pop_back();
10510 
10511     unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
10512                        BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
10513                           ? Intrinsic::aarch64_neon_sqadd
10514                           : Intrinsic::aarch64_neon_sqsub;
10515     return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
10516   }
10517   case NEON::BI__builtin_neon_vqdmlals_s32:
10518   case NEON::BI__builtin_neon_vqdmlsls_s32: {
10519     SmallVector<Value *, 2> ProductOps;
10520     ProductOps.push_back(Ops[1]);
10521     ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
10522     Ops[1] =
10523         EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
10524                      ProductOps, "vqdmlXl");
10525 
10526     unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
10527                                         ? Intrinsic::aarch64_neon_sqadd
10528                                         : Intrinsic::aarch64_neon_sqsub;
10529     return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
10530   }
10531   case NEON::BI__builtin_neon_vqdmlals_lane_s32:
10532   case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
10533   case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
10534   case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
10535     Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
10536                                           "lane");
10537     SmallVector<Value *, 2> ProductOps;
10538     ProductOps.push_back(Ops[1]);
10539     ProductOps.push_back(Ops[2]);
10540     Ops[1] =
10541         EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
10542                      ProductOps, "vqdmlXl");
10543     Ops.pop_back();
10544 
10545     unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
10546                        BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
10547                           ? Intrinsic::aarch64_neon_sqadd
10548                           : Intrinsic::aarch64_neon_sqsub;
10549     return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
10550   }
10551   case NEON::BI__builtin_neon_vget_lane_bf16:
10552   case NEON::BI__builtin_neon_vduph_lane_bf16:
10553   case NEON::BI__builtin_neon_vduph_lane_f16: {
10554     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10555                                         "vget_lane");
10556   }
10557   case NEON::BI__builtin_neon_vgetq_lane_bf16:
10558   case NEON::BI__builtin_neon_vduph_laneq_bf16:
10559   case NEON::BI__builtin_neon_vduph_laneq_f16: {
10560     return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
10561                                         "vgetq_lane");
10562   }
10563 
10564   case AArch64::BI_InterlockedAdd: {
10565     Value *Arg0 = EmitScalarExpr(E->getArg(0));
10566     Value *Arg1 = EmitScalarExpr(E->getArg(1));
10567     AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
10568       AtomicRMWInst::Add, Arg0, Arg1,
10569       llvm::AtomicOrdering::SequentiallyConsistent);
10570     return Builder.CreateAdd(RMWI, Arg1);
10571   }
10572   }
10573 
10574   llvm::FixedVectorType *VTy = GetNeonType(this, Type);
10575   llvm::Type *Ty = VTy;
10576   if (!Ty)
10577     return nullptr;
10578 
10579   // Not all intrinsics handled by the common case work for AArch64 yet, so only
10580   // defer to common code if it's been added to our special map.
10581   Builtin = findARMVectorIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
10582                                         AArch64SIMDIntrinsicsProvenSorted);
10583 
10584   if (Builtin)
10585     return EmitCommonNeonBuiltinExpr(
10586         Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
10587         Builtin->NameHint, Builtin->TypeModifier, E, Ops,
10588         /*never use addresses*/ Address::invalid(), Address::invalid(), Arch);
10589 
10590   if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops, Arch))
10591     return V;
10592 
10593   unsigned Int;
10594   switch (BuiltinID) {
10595   default: return nullptr;
10596   case NEON::BI__builtin_neon_vbsl_v:
10597   case NEON::BI__builtin_neon_vbslq_v: {
10598     llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
10599     Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
10600     Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
10601     Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
10602 
10603     Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
10604     Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
10605     Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
10606     return Builder.CreateBitCast(Ops[0], Ty);
10607   }
10608   case NEON::BI__builtin_neon_vfma_lane_v:
10609   case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
10610     // The ARM builtins (and instructions) have the addend as the first
10611     // operand, but the 'fma' intrinsics have it last. Swap it around here.
10612     Value *Addend = Ops[0];
10613     Value *Multiplicand = Ops[1];
10614     Value *LaneSource = Ops[2];
10615     Ops[0] = Multiplicand;
10616     Ops[1] = LaneSource;
10617     Ops[2] = Addend;
10618 
10619     // Now adjust things to handle the lane access.
10620     auto *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v
10621                          ? llvm::FixedVectorType::get(VTy->getElementType(),
10622                                                       VTy->getNumElements() / 2)
10623                          : VTy;
10624     llvm::Constant *cst = cast<Constant>(Ops[3]);
10625     Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(), cst);
10626     Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
10627     Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
10628 
10629     Ops.pop_back();
10630     Int = Builder.getIsFPConstrained() ? Intrinsic::experimental_constrained_fma
10631                                        : Intrinsic::fma;
10632     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
10633   }
10634   case NEON::BI__builtin_neon_vfma_laneq_v: {
10635     auto *VTy = cast<llvm::FixedVectorType>(Ty);
10636     // v1f64 fma should be mapped to Neon scalar f64 fma
10637     if (VTy && VTy->getElementType() == DoubleTy) {
10638       Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
10639       Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
10640       llvm::FixedVectorType *VTy =
10641           GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, true));
10642       Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
10643       Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10644       Value *Result;
10645       Result = emitCallMaybeConstrainedFPBuiltin(
10646           *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma,
10647           DoubleTy, {Ops[1], Ops[2], Ops[0]});
10648       return Builder.CreateBitCast(Result, Ty);
10649     }
10650     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10651     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10652 
10653     auto *STy = llvm::FixedVectorType::get(VTy->getElementType(),
10654                                            VTy->getNumElements() * 2);
10655     Ops[2] = Builder.CreateBitCast(Ops[2], STy);
10656     Value *SV = llvm::ConstantVector::getSplat(VTy->getElementCount(),
10657                                                cast<ConstantInt>(Ops[3]));
10658     Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
10659 
10660     return emitCallMaybeConstrainedFPBuiltin(
10661         *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10662         {Ops[2], Ops[1], Ops[0]});
10663   }
10664   case NEON::BI__builtin_neon_vfmaq_laneq_v: {
10665     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10666     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
10667 
10668     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
10669     Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
10670     return emitCallMaybeConstrainedFPBuiltin(
10671         *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10672         {Ops[2], Ops[1], Ops[0]});
10673   }
10674   case NEON::BI__builtin_neon_vfmah_lane_f16:
10675   case NEON::BI__builtin_neon_vfmas_lane_f32:
10676   case NEON::BI__builtin_neon_vfmah_laneq_f16:
10677   case NEON::BI__builtin_neon_vfmas_laneq_f32:
10678   case NEON::BI__builtin_neon_vfmad_lane_f64:
10679   case NEON::BI__builtin_neon_vfmad_laneq_f64: {
10680     Ops.push_back(EmitScalarExpr(E->getArg(3)));
10681     llvm::Type *Ty = ConvertType(E->getCallReturnType(getContext()));
10682     Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
10683     return emitCallMaybeConstrainedFPBuiltin(
10684         *this, Intrinsic::fma, Intrinsic::experimental_constrained_fma, Ty,
10685         {Ops[1], Ops[2], Ops[0]});
10686   }
10687   case NEON::BI__builtin_neon_vmull_v:
10688     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10689     Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
10690     if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
10691     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
10692   case NEON::BI__builtin_neon_vmax_v:
10693   case NEON::BI__builtin_neon_vmaxq_v:
10694     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10695     Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
10696     if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
10697     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
10698   case NEON::BI__builtin_neon_vmaxh_f16: {
10699     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10700     Int = Intrinsic::aarch64_neon_fmax;
10701     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmax");
10702   }
10703   case NEON::BI__builtin_neon_vmin_v:
10704   case NEON::BI__builtin_neon_vminq_v:
10705     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10706     Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
10707     if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
10708     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
10709   case NEON::BI__builtin_neon_vminh_f16: {
10710     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10711     Int = Intrinsic::aarch64_neon_fmin;
10712     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmin");
10713   }
10714   case NEON::BI__builtin_neon_vabd_v:
10715   case NEON::BI__builtin_neon_vabdq_v:
10716     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10717     Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
10718     if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
10719     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
10720   case NEON::BI__builtin_neon_vpadal_v:
10721   case NEON::BI__builtin_neon_vpadalq_v: {
10722     unsigned ArgElts = VTy->getNumElements();
10723     llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
10724     unsigned BitWidth = EltTy->getBitWidth();
10725     auto *ArgTy = llvm::FixedVectorType::get(
10726         llvm::IntegerType::get(getLLVMContext(), BitWidth / 2), 2 * ArgElts);
10727     llvm::Type* Tys[2] = { VTy, ArgTy };
10728     Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
10729     SmallVector<llvm::Value*, 1> TmpOps;
10730     TmpOps.push_back(Ops[1]);
10731     Function *F = CGM.getIntrinsic(Int, Tys);
10732     llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
10733     llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
10734     return Builder.CreateAdd(tmp, addend);
10735   }
10736   case NEON::BI__builtin_neon_vpmin_v:
10737   case NEON::BI__builtin_neon_vpminq_v:
10738     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10739     Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
10740     if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
10741     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
10742   case NEON::BI__builtin_neon_vpmax_v:
10743   case NEON::BI__builtin_neon_vpmaxq_v:
10744     // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
10745     Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
10746     if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
10747     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
10748   case NEON::BI__builtin_neon_vminnm_v:
10749   case NEON::BI__builtin_neon_vminnmq_v:
10750     Int = Intrinsic::aarch64_neon_fminnm;
10751     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
10752   case NEON::BI__builtin_neon_vminnmh_f16:
10753     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10754     Int = Intrinsic::aarch64_neon_fminnm;
10755     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vminnm");
10756   case NEON::BI__builtin_neon_vmaxnm_v:
10757   case NEON::BI__builtin_neon_vmaxnmq_v:
10758     Int = Intrinsic::aarch64_neon_fmaxnm;
10759     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
10760   case NEON::BI__builtin_neon_vmaxnmh_f16:
10761     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10762     Int = Intrinsic::aarch64_neon_fmaxnm;
10763     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmaxnm");
10764   case NEON::BI__builtin_neon_vrecpss_f32: {
10765     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10766     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, FloatTy),
10767                         Ops, "vrecps");
10768   }
10769   case NEON::BI__builtin_neon_vrecpsd_f64:
10770     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10771     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, DoubleTy),
10772                         Ops, "vrecps");
10773   case NEON::BI__builtin_neon_vrecpsh_f16:
10774     Ops.push_back(EmitScalarExpr(E->getArg(1)));
10775     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, HalfTy),
10776                         Ops, "vrecps");
10777   case NEON::BI__builtin_neon_vqshrun_n_v:
10778     Int = Intrinsic::aarch64_neon_sqshrun;
10779     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
10780   case NEON::BI__builtin_neon_vqrshrun_n_v:
10781     Int = Intrinsic::aarch64_neon_sqrshrun;
10782     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
10783   case NEON::BI__builtin_neon_vqshrn_n_v:
10784     Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
10785     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
10786   case NEON::BI__builtin_neon_vrshrn_n_v:
10787     Int = Intrinsic::aarch64_neon_rshrn;
10788     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
10789   case NEON::BI__builtin_neon_vqrshrn_n_v:
10790     Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
10791     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
10792   case NEON::BI__builtin_neon_vrndah_f16: {
10793     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10794     Int = Builder.getIsFPConstrained()
10795               ? Intrinsic::experimental_constrained_round
10796               : Intrinsic::round;
10797     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrnda");
10798   }
10799   case NEON::BI__builtin_neon_vrnda_v:
10800   case NEON::BI__builtin_neon_vrndaq_v: {
10801     Int = Builder.getIsFPConstrained()
10802               ? Intrinsic::experimental_constrained_round
10803               : Intrinsic::round;
10804     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
10805   }
10806   case NEON::BI__builtin_neon_vrndih_f16: {
10807     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10808     Int = Builder.getIsFPConstrained()
10809               ? Intrinsic::experimental_constrained_nearbyint
10810               : Intrinsic::nearbyint;
10811     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndi");
10812   }
10813   case NEON::BI__builtin_neon_vrndmh_f16: {
10814     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10815     Int = Builder.getIsFPConstrained()
10816               ? Intrinsic::experimental_constrained_floor
10817               : Intrinsic::floor;
10818     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndm");
10819   }
10820   case NEON::BI__builtin_neon_vrndm_v:
10821   case NEON::BI__builtin_neon_vrndmq_v: {
10822     Int = Builder.getIsFPConstrained()
10823               ? Intrinsic::experimental_constrained_floor
10824               : Intrinsic::floor;
10825     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
10826   }
10827   case NEON::BI__builtin_neon_vrndnh_f16: {
10828     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10829     Int = Builder.getIsFPConstrained()
10830               ? Intrinsic::experimental_constrained_roundeven
10831               : Intrinsic::roundeven;
10832     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
10833   }
10834   case NEON::BI__builtin_neon_vrndn_v:
10835   case NEON::BI__builtin_neon_vrndnq_v: {
10836     Int = Builder.getIsFPConstrained()
10837               ? Intrinsic::experimental_constrained_roundeven
10838               : Intrinsic::roundeven;
10839     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
10840   }
10841   case NEON::BI__builtin_neon_vrndns_f32: {
10842     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10843     Int = Builder.getIsFPConstrained()
10844               ? Intrinsic::experimental_constrained_roundeven
10845               : Intrinsic::roundeven;
10846     return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
10847   }
10848   case NEON::BI__builtin_neon_vrndph_f16: {
10849     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10850     Int = Builder.getIsFPConstrained()
10851               ? Intrinsic::experimental_constrained_ceil
10852               : Intrinsic::ceil;
10853     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndp");
10854   }
10855   case NEON::BI__builtin_neon_vrndp_v:
10856   case NEON::BI__builtin_neon_vrndpq_v: {
10857     Int = Builder.getIsFPConstrained()
10858               ? Intrinsic::experimental_constrained_ceil
10859               : Intrinsic::ceil;
10860     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
10861   }
10862   case NEON::BI__builtin_neon_vrndxh_f16: {
10863     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10864     Int = Builder.getIsFPConstrained()
10865               ? Intrinsic::experimental_constrained_rint
10866               : Intrinsic::rint;
10867     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndx");
10868   }
10869   case NEON::BI__builtin_neon_vrndx_v:
10870   case NEON::BI__builtin_neon_vrndxq_v: {
10871     Int = Builder.getIsFPConstrained()
10872               ? Intrinsic::experimental_constrained_rint
10873               : Intrinsic::rint;
10874     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
10875   }
10876   case NEON::BI__builtin_neon_vrndh_f16: {
10877     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10878     Int = Builder.getIsFPConstrained()
10879               ? Intrinsic::experimental_constrained_trunc
10880               : Intrinsic::trunc;
10881     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
10882   }
10883   case NEON::BI__builtin_neon_vrnd32x_v:
10884   case NEON::BI__builtin_neon_vrnd32xq_v: {
10885     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10886     Int = Intrinsic::aarch64_neon_frint32x;
10887     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
10888   }
10889   case NEON::BI__builtin_neon_vrnd32z_v:
10890   case NEON::BI__builtin_neon_vrnd32zq_v: {
10891     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10892     Int = Intrinsic::aarch64_neon_frint32z;
10893     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
10894   }
10895   case NEON::BI__builtin_neon_vrnd64x_v:
10896   case NEON::BI__builtin_neon_vrnd64xq_v: {
10897     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10898     Int = Intrinsic::aarch64_neon_frint64x;
10899     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
10900   }
10901   case NEON::BI__builtin_neon_vrnd64z_v:
10902   case NEON::BI__builtin_neon_vrnd64zq_v: {
10903     Ops.push_back(EmitScalarExpr(E->getArg(0)));
10904     Int = Intrinsic::aarch64_neon_frint64z;
10905     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
10906   }
10907   case NEON::BI__builtin_neon_vrnd_v:
10908   case NEON::BI__builtin_neon_vrndq_v: {
10909     Int = Builder.getIsFPConstrained()
10910               ? Intrinsic::experimental_constrained_trunc
10911               : Intrinsic::trunc;
10912     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
10913   }
10914   case NEON::BI__builtin_neon_vcvt_f64_v:
10915   case NEON::BI__builtin_neon_vcvtq_f64_v:
10916     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
10917     Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
10918     return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
10919                 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
10920   case NEON::BI__builtin_neon_vcvt_f64_f32: {
10921     assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
10922            "unexpected vcvt_f64_f32 builtin");
10923     NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
10924     Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10925 
10926     return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
10927   }
10928   case NEON::BI__builtin_neon_vcvt_f32_f64: {
10929     assert(Type.getEltType() == NeonTypeFlags::Float32 &&
10930            "unexpected vcvt_f32_f64 builtin");
10931     NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
10932     Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
10933 
10934     return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
10935   }
10936   case NEON::BI__builtin_neon_vcvt_s32_v:
10937   case NEON::BI__builtin_neon_vcvt_u32_v:
10938   case NEON::BI__builtin_neon_vcvt_s64_v:
10939   case NEON::BI__builtin_neon_vcvt_u64_v:
10940   case NEON::BI__builtin_neon_vcvt_s16_v:
10941   case NEON::BI__builtin_neon_vcvt_u16_v:
10942   case NEON::BI__builtin_neon_vcvtq_s32_v:
10943   case NEON::BI__builtin_neon_vcvtq_u32_v:
10944   case NEON::BI__builtin_neon_vcvtq_s64_v:
10945   case NEON::BI__builtin_neon_vcvtq_u64_v:
10946   case NEON::BI__builtin_neon_vcvtq_s16_v:
10947   case NEON::BI__builtin_neon_vcvtq_u16_v: {
10948     Int =
10949         usgn ? Intrinsic::aarch64_neon_fcvtzu : Intrinsic::aarch64_neon_fcvtzs;
10950     llvm::Type *Tys[2] = {Ty, GetFloatNeonType(this, Type)};
10951     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtz");
10952   }
10953   case NEON::BI__builtin_neon_vcvta_s16_v:
10954   case NEON::BI__builtin_neon_vcvta_u16_v:
10955   case NEON::BI__builtin_neon_vcvta_s32_v:
10956   case NEON::BI__builtin_neon_vcvtaq_s16_v:
10957   case NEON::BI__builtin_neon_vcvtaq_s32_v:
10958   case NEON::BI__builtin_neon_vcvta_u32_v:
10959   case NEON::BI__builtin_neon_vcvtaq_u16_v:
10960   case NEON::BI__builtin_neon_vcvtaq_u32_v:
10961   case NEON::BI__builtin_neon_vcvta_s64_v:
10962   case NEON::BI__builtin_neon_vcvtaq_s64_v:
10963   case NEON::BI__builtin_neon_vcvta_u64_v:
10964   case NEON::BI__builtin_neon_vcvtaq_u64_v: {
10965     Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
10966     llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10967     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
10968   }
10969   case NEON::BI__builtin_neon_vcvtm_s16_v:
10970   case NEON::BI__builtin_neon_vcvtm_s32_v:
10971   case NEON::BI__builtin_neon_vcvtmq_s16_v:
10972   case NEON::BI__builtin_neon_vcvtmq_s32_v:
10973   case NEON::BI__builtin_neon_vcvtm_u16_v:
10974   case NEON::BI__builtin_neon_vcvtm_u32_v:
10975   case NEON::BI__builtin_neon_vcvtmq_u16_v:
10976   case NEON::BI__builtin_neon_vcvtmq_u32_v:
10977   case NEON::BI__builtin_neon_vcvtm_s64_v:
10978   case NEON::BI__builtin_neon_vcvtmq_s64_v:
10979   case NEON::BI__builtin_neon_vcvtm_u64_v:
10980   case NEON::BI__builtin_neon_vcvtmq_u64_v: {
10981     Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
10982     llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10983     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
10984   }
10985   case NEON::BI__builtin_neon_vcvtn_s16_v:
10986   case NEON::BI__builtin_neon_vcvtn_s32_v:
10987   case NEON::BI__builtin_neon_vcvtnq_s16_v:
10988   case NEON::BI__builtin_neon_vcvtnq_s32_v:
10989   case NEON::BI__builtin_neon_vcvtn_u16_v:
10990   case NEON::BI__builtin_neon_vcvtn_u32_v:
10991   case NEON::BI__builtin_neon_vcvtnq_u16_v:
10992   case NEON::BI__builtin_neon_vcvtnq_u32_v:
10993   case NEON::BI__builtin_neon_vcvtn_s64_v:
10994   case NEON::BI__builtin_neon_vcvtnq_s64_v:
10995   case NEON::BI__builtin_neon_vcvtn_u64_v:
10996   case NEON::BI__builtin_neon_vcvtnq_u64_v: {
10997     Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
10998     llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
10999     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
11000   }
11001   case NEON::BI__builtin_neon_vcvtp_s16_v:
11002   case NEON::BI__builtin_neon_vcvtp_s32_v:
11003   case NEON::BI__builtin_neon_vcvtpq_s16_v:
11004   case NEON::BI__builtin_neon_vcvtpq_s32_v:
11005   case NEON::BI__builtin_neon_vcvtp_u16_v:
11006   case NEON::BI__builtin_neon_vcvtp_u32_v:
11007   case NEON::BI__builtin_neon_vcvtpq_u16_v:
11008   case NEON::BI__builtin_neon_vcvtpq_u32_v:
11009   case NEON::BI__builtin_neon_vcvtp_s64_v:
11010   case NEON::BI__builtin_neon_vcvtpq_s64_v:
11011   case NEON::BI__builtin_neon_vcvtp_u64_v:
11012   case NEON::BI__builtin_neon_vcvtpq_u64_v: {
11013     Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
11014     llvm::Type *Tys[2] = { Ty, GetFloatNeonType(this, Type) };
11015     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
11016   }
11017   case NEON::BI__builtin_neon_vmulx_v:
11018   case NEON::BI__builtin_neon_vmulxq_v: {
11019     Int = Intrinsic::aarch64_neon_fmulx;
11020     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
11021   }
11022   case NEON::BI__builtin_neon_vmulxh_lane_f16:
11023   case NEON::BI__builtin_neon_vmulxh_laneq_f16: {
11024     // vmulx_lane should be mapped to Neon scalar mulx after
11025     // extracting the scalar element
11026     Ops.push_back(EmitScalarExpr(E->getArg(2)));
11027     Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
11028     Ops.pop_back();
11029     Int = Intrinsic::aarch64_neon_fmulx;
11030     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vmulx");
11031   }
11032   case NEON::BI__builtin_neon_vmul_lane_v:
11033   case NEON::BI__builtin_neon_vmul_laneq_v: {
11034     // v1f64 vmul_lane should be mapped to Neon scalar mul lane
11035     bool Quad = false;
11036     if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
11037       Quad = true;
11038     Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11039     llvm::FixedVectorType *VTy =
11040         GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
11041     Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
11042     Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
11043     Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
11044     return Builder.CreateBitCast(Result, Ty);
11045   }
11046   case NEON::BI__builtin_neon_vnegd_s64:
11047     return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
11048   case NEON::BI__builtin_neon_vnegh_f16:
11049     return Builder.CreateFNeg(EmitScalarExpr(E->getArg(0)), "vnegh");
11050   case NEON::BI__builtin_neon_vpmaxnm_v:
11051   case NEON::BI__builtin_neon_vpmaxnmq_v: {
11052     Int = Intrinsic::aarch64_neon_fmaxnmp;
11053     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
11054   }
11055   case NEON::BI__builtin_neon_vpminnm_v:
11056   case NEON::BI__builtin_neon_vpminnmq_v: {
11057     Int = Intrinsic::aarch64_neon_fminnmp;
11058     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
11059   }
11060   case NEON::BI__builtin_neon_vsqrth_f16: {
11061     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11062     Int = Builder.getIsFPConstrained()
11063               ? Intrinsic::experimental_constrained_sqrt
11064               : Intrinsic::sqrt;
11065     return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vsqrt");
11066   }
11067   case NEON::BI__builtin_neon_vsqrt_v:
11068   case NEON::BI__builtin_neon_vsqrtq_v: {
11069     Int = Builder.getIsFPConstrained()
11070               ? Intrinsic::experimental_constrained_sqrt
11071               : Intrinsic::sqrt;
11072     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11073     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
11074   }
11075   case NEON::BI__builtin_neon_vrbit_v:
11076   case NEON::BI__builtin_neon_vrbitq_v: {
11077     Int = Intrinsic::bitreverse;
11078     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
11079   }
11080   case NEON::BI__builtin_neon_vaddv_u8:
11081     // FIXME: These are handled by the AArch64 scalar code.
11082     usgn = true;
11083     LLVM_FALLTHROUGH;
11084   case NEON::BI__builtin_neon_vaddv_s8: {
11085     Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11086     Ty = Int32Ty;
11087     VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11088     llvm::Type *Tys[2] = { Ty, VTy };
11089     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11090     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11091     return Builder.CreateTrunc(Ops[0], Int8Ty);
11092   }
11093   case NEON::BI__builtin_neon_vaddv_u16:
11094     usgn = true;
11095     LLVM_FALLTHROUGH;
11096   case NEON::BI__builtin_neon_vaddv_s16: {
11097     Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11098     Ty = Int32Ty;
11099     VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11100     llvm::Type *Tys[2] = { Ty, VTy };
11101     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11102     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11103     return Builder.CreateTrunc(Ops[0], Int16Ty);
11104   }
11105   case NEON::BI__builtin_neon_vaddvq_u8:
11106     usgn = true;
11107     LLVM_FALLTHROUGH;
11108   case NEON::BI__builtin_neon_vaddvq_s8: {
11109     Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11110     Ty = Int32Ty;
11111     VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11112     llvm::Type *Tys[2] = { Ty, VTy };
11113     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11114     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11115     return Builder.CreateTrunc(Ops[0], Int8Ty);
11116   }
11117   case NEON::BI__builtin_neon_vaddvq_u16:
11118     usgn = true;
11119     LLVM_FALLTHROUGH;
11120   case NEON::BI__builtin_neon_vaddvq_s16: {
11121     Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
11122     Ty = Int32Ty;
11123     VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11124     llvm::Type *Tys[2] = { Ty, VTy };
11125     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11126     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
11127     return Builder.CreateTrunc(Ops[0], Int16Ty);
11128   }
11129   case NEON::BI__builtin_neon_vmaxv_u8: {
11130     Int = Intrinsic::aarch64_neon_umaxv;
11131     Ty = Int32Ty;
11132     VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11133     llvm::Type *Tys[2] = { Ty, VTy };
11134     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11135     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11136     return Builder.CreateTrunc(Ops[0], Int8Ty);
11137   }
11138   case NEON::BI__builtin_neon_vmaxv_u16: {
11139     Int = Intrinsic::aarch64_neon_umaxv;
11140     Ty = Int32Ty;
11141     VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11142     llvm::Type *Tys[2] = { Ty, VTy };
11143     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11144     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11145     return Builder.CreateTrunc(Ops[0], Int16Ty);
11146   }
11147   case NEON::BI__builtin_neon_vmaxvq_u8: {
11148     Int = Intrinsic::aarch64_neon_umaxv;
11149     Ty = Int32Ty;
11150     VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11151     llvm::Type *Tys[2] = { Ty, VTy };
11152     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11153     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11154     return Builder.CreateTrunc(Ops[0], Int8Ty);
11155   }
11156   case NEON::BI__builtin_neon_vmaxvq_u16: {
11157     Int = Intrinsic::aarch64_neon_umaxv;
11158     Ty = Int32Ty;
11159     VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11160     llvm::Type *Tys[2] = { Ty, VTy };
11161     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11162     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11163     return Builder.CreateTrunc(Ops[0], Int16Ty);
11164   }
11165   case NEON::BI__builtin_neon_vmaxv_s8: {
11166     Int = Intrinsic::aarch64_neon_smaxv;
11167     Ty = Int32Ty;
11168     VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11169     llvm::Type *Tys[2] = { Ty, VTy };
11170     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11171     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11172     return Builder.CreateTrunc(Ops[0], Int8Ty);
11173   }
11174   case NEON::BI__builtin_neon_vmaxv_s16: {
11175     Int = Intrinsic::aarch64_neon_smaxv;
11176     Ty = Int32Ty;
11177     VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11178     llvm::Type *Tys[2] = { Ty, VTy };
11179     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11180     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11181     return Builder.CreateTrunc(Ops[0], Int16Ty);
11182   }
11183   case NEON::BI__builtin_neon_vmaxvq_s8: {
11184     Int = Intrinsic::aarch64_neon_smaxv;
11185     Ty = Int32Ty;
11186     VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11187     llvm::Type *Tys[2] = { Ty, VTy };
11188     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11189     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11190     return Builder.CreateTrunc(Ops[0], Int8Ty);
11191   }
11192   case NEON::BI__builtin_neon_vmaxvq_s16: {
11193     Int = Intrinsic::aarch64_neon_smaxv;
11194     Ty = Int32Ty;
11195     VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11196     llvm::Type *Tys[2] = { Ty, VTy };
11197     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11198     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11199     return Builder.CreateTrunc(Ops[0], Int16Ty);
11200   }
11201   case NEON::BI__builtin_neon_vmaxv_f16: {
11202     Int = Intrinsic::aarch64_neon_fmaxv;
11203     Ty = HalfTy;
11204     VTy = llvm::FixedVectorType::get(HalfTy, 4);
11205     llvm::Type *Tys[2] = { Ty, VTy };
11206     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11207     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11208     return Builder.CreateTrunc(Ops[0], HalfTy);
11209   }
11210   case NEON::BI__builtin_neon_vmaxvq_f16: {
11211     Int = Intrinsic::aarch64_neon_fmaxv;
11212     Ty = HalfTy;
11213     VTy = llvm::FixedVectorType::get(HalfTy, 8);
11214     llvm::Type *Tys[2] = { Ty, VTy };
11215     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11216     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
11217     return Builder.CreateTrunc(Ops[0], HalfTy);
11218   }
11219   case NEON::BI__builtin_neon_vminv_u8: {
11220     Int = Intrinsic::aarch64_neon_uminv;
11221     Ty = Int32Ty;
11222     VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11223     llvm::Type *Tys[2] = { Ty, VTy };
11224     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11225     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11226     return Builder.CreateTrunc(Ops[0], Int8Ty);
11227   }
11228   case NEON::BI__builtin_neon_vminv_u16: {
11229     Int = Intrinsic::aarch64_neon_uminv;
11230     Ty = Int32Ty;
11231     VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11232     llvm::Type *Tys[2] = { Ty, VTy };
11233     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11234     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11235     return Builder.CreateTrunc(Ops[0], Int16Ty);
11236   }
11237   case NEON::BI__builtin_neon_vminvq_u8: {
11238     Int = Intrinsic::aarch64_neon_uminv;
11239     Ty = Int32Ty;
11240     VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11241     llvm::Type *Tys[2] = { Ty, VTy };
11242     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11243     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11244     return Builder.CreateTrunc(Ops[0], Int8Ty);
11245   }
11246   case NEON::BI__builtin_neon_vminvq_u16: {
11247     Int = Intrinsic::aarch64_neon_uminv;
11248     Ty = Int32Ty;
11249     VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11250     llvm::Type *Tys[2] = { Ty, VTy };
11251     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11252     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11253     return Builder.CreateTrunc(Ops[0], Int16Ty);
11254   }
11255   case NEON::BI__builtin_neon_vminv_s8: {
11256     Int = Intrinsic::aarch64_neon_sminv;
11257     Ty = Int32Ty;
11258     VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11259     llvm::Type *Tys[2] = { Ty, VTy };
11260     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11261     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11262     return Builder.CreateTrunc(Ops[0], Int8Ty);
11263   }
11264   case NEON::BI__builtin_neon_vminv_s16: {
11265     Int = Intrinsic::aarch64_neon_sminv;
11266     Ty = Int32Ty;
11267     VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11268     llvm::Type *Tys[2] = { Ty, VTy };
11269     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11270     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11271     return Builder.CreateTrunc(Ops[0], Int16Ty);
11272   }
11273   case NEON::BI__builtin_neon_vminvq_s8: {
11274     Int = Intrinsic::aarch64_neon_sminv;
11275     Ty = Int32Ty;
11276     VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11277     llvm::Type *Tys[2] = { Ty, VTy };
11278     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11279     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11280     return Builder.CreateTrunc(Ops[0], Int8Ty);
11281   }
11282   case NEON::BI__builtin_neon_vminvq_s16: {
11283     Int = Intrinsic::aarch64_neon_sminv;
11284     Ty = Int32Ty;
11285     VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11286     llvm::Type *Tys[2] = { Ty, VTy };
11287     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11288     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11289     return Builder.CreateTrunc(Ops[0], Int16Ty);
11290   }
11291   case NEON::BI__builtin_neon_vminv_f16: {
11292     Int = Intrinsic::aarch64_neon_fminv;
11293     Ty = HalfTy;
11294     VTy = llvm::FixedVectorType::get(HalfTy, 4);
11295     llvm::Type *Tys[2] = { Ty, VTy };
11296     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11297     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11298     return Builder.CreateTrunc(Ops[0], HalfTy);
11299   }
11300   case NEON::BI__builtin_neon_vminvq_f16: {
11301     Int = Intrinsic::aarch64_neon_fminv;
11302     Ty = HalfTy;
11303     VTy = llvm::FixedVectorType::get(HalfTy, 8);
11304     llvm::Type *Tys[2] = { Ty, VTy };
11305     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11306     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
11307     return Builder.CreateTrunc(Ops[0], HalfTy);
11308   }
11309   case NEON::BI__builtin_neon_vmaxnmv_f16: {
11310     Int = Intrinsic::aarch64_neon_fmaxnmv;
11311     Ty = HalfTy;
11312     VTy = llvm::FixedVectorType::get(HalfTy, 4);
11313     llvm::Type *Tys[2] = { Ty, VTy };
11314     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11315     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
11316     return Builder.CreateTrunc(Ops[0], HalfTy);
11317   }
11318   case NEON::BI__builtin_neon_vmaxnmvq_f16: {
11319     Int = Intrinsic::aarch64_neon_fmaxnmv;
11320     Ty = HalfTy;
11321     VTy = llvm::FixedVectorType::get(HalfTy, 8);
11322     llvm::Type *Tys[2] = { Ty, VTy };
11323     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11324     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxnmv");
11325     return Builder.CreateTrunc(Ops[0], HalfTy);
11326   }
11327   case NEON::BI__builtin_neon_vminnmv_f16: {
11328     Int = Intrinsic::aarch64_neon_fminnmv;
11329     Ty = HalfTy;
11330     VTy = llvm::FixedVectorType::get(HalfTy, 4);
11331     llvm::Type *Tys[2] = { Ty, VTy };
11332     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11333     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
11334     return Builder.CreateTrunc(Ops[0], HalfTy);
11335   }
11336   case NEON::BI__builtin_neon_vminnmvq_f16: {
11337     Int = Intrinsic::aarch64_neon_fminnmv;
11338     Ty = HalfTy;
11339     VTy = llvm::FixedVectorType::get(HalfTy, 8);
11340     llvm::Type *Tys[2] = { Ty, VTy };
11341     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11342     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminnmv");
11343     return Builder.CreateTrunc(Ops[0], HalfTy);
11344   }
11345   case NEON::BI__builtin_neon_vmul_n_f64: {
11346     Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
11347     Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
11348     return Builder.CreateFMul(Ops[0], RHS);
11349   }
11350   case NEON::BI__builtin_neon_vaddlv_u8: {
11351     Int = Intrinsic::aarch64_neon_uaddlv;
11352     Ty = Int32Ty;
11353     VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11354     llvm::Type *Tys[2] = { Ty, VTy };
11355     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11356     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11357     return Builder.CreateTrunc(Ops[0], Int16Ty);
11358   }
11359   case NEON::BI__builtin_neon_vaddlv_u16: {
11360     Int = Intrinsic::aarch64_neon_uaddlv;
11361     Ty = Int32Ty;
11362     VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11363     llvm::Type *Tys[2] = { Ty, VTy };
11364     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11365     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11366   }
11367   case NEON::BI__builtin_neon_vaddlvq_u8: {
11368     Int = Intrinsic::aarch64_neon_uaddlv;
11369     Ty = Int32Ty;
11370     VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11371     llvm::Type *Tys[2] = { Ty, VTy };
11372     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11373     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11374     return Builder.CreateTrunc(Ops[0], Int16Ty);
11375   }
11376   case NEON::BI__builtin_neon_vaddlvq_u16: {
11377     Int = Intrinsic::aarch64_neon_uaddlv;
11378     Ty = Int32Ty;
11379     VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11380     llvm::Type *Tys[2] = { Ty, VTy };
11381     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11382     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11383   }
11384   case NEON::BI__builtin_neon_vaddlv_s8: {
11385     Int = Intrinsic::aarch64_neon_saddlv;
11386     Ty = Int32Ty;
11387     VTy = llvm::FixedVectorType::get(Int8Ty, 8);
11388     llvm::Type *Tys[2] = { Ty, VTy };
11389     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11390     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11391     return Builder.CreateTrunc(Ops[0], Int16Ty);
11392   }
11393   case NEON::BI__builtin_neon_vaddlv_s16: {
11394     Int = Intrinsic::aarch64_neon_saddlv;
11395     Ty = Int32Ty;
11396     VTy = llvm::FixedVectorType::get(Int16Ty, 4);
11397     llvm::Type *Tys[2] = { Ty, VTy };
11398     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11399     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11400   }
11401   case NEON::BI__builtin_neon_vaddlvq_s8: {
11402     Int = Intrinsic::aarch64_neon_saddlv;
11403     Ty = Int32Ty;
11404     VTy = llvm::FixedVectorType::get(Int8Ty, 16);
11405     llvm::Type *Tys[2] = { Ty, VTy };
11406     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11407     Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11408     return Builder.CreateTrunc(Ops[0], Int16Ty);
11409   }
11410   case NEON::BI__builtin_neon_vaddlvq_s16: {
11411     Int = Intrinsic::aarch64_neon_saddlv;
11412     Ty = Int32Ty;
11413     VTy = llvm::FixedVectorType::get(Int16Ty, 8);
11414     llvm::Type *Tys[2] = { Ty, VTy };
11415     Ops.push_back(EmitScalarExpr(E->getArg(0)));
11416     return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
11417   }
11418   case NEON::BI__builtin_neon_vsri_n_v:
11419   case NEON::BI__builtin_neon_vsriq_n_v: {
11420     Int = Intrinsic::aarch64_neon_vsri;
11421     llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
11422     return EmitNeonCall(Intrin, Ops, "vsri_n");
11423   }
11424   case NEON::BI__builtin_neon_vsli_n_v:
11425   case NEON::BI__builtin_neon_vsliq_n_v: {
11426     Int = Intrinsic::aarch64_neon_vsli;
11427     llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
11428     return EmitNeonCall(Intrin, Ops, "vsli_n");
11429   }
11430   case NEON::BI__builtin_neon_vsra_n_v:
11431   case NEON::BI__builtin_neon_vsraq_n_v:
11432     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11433     Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
11434     return Builder.CreateAdd(Ops[0], Ops[1]);
11435   case NEON::BI__builtin_neon_vrsra_n_v:
11436   case NEON::BI__builtin_neon_vrsraq_n_v: {
11437     Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
11438     SmallVector<llvm::Value*,2> TmpOps;
11439     TmpOps.push_back(Ops[1]);
11440     TmpOps.push_back(Ops[2]);
11441     Function* F = CGM.getIntrinsic(Int, Ty);
11442     llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
11443     Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
11444     return Builder.CreateAdd(Ops[0], tmp);
11445   }
11446   case NEON::BI__builtin_neon_vld1_v:
11447   case NEON::BI__builtin_neon_vld1q_v: {
11448     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
11449     return Builder.CreateAlignedLoad(VTy, Ops[0], PtrOp0.getAlignment());
11450   }
11451   case NEON::BI__builtin_neon_vst1_v:
11452   case NEON::BI__builtin_neon_vst1q_v:
11453     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
11454     Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
11455     return Builder.CreateAlignedStore(Ops[1], Ops[0], PtrOp0.getAlignment());
11456   case NEON::BI__builtin_neon_vld1_lane_v:
11457   case NEON::BI__builtin_neon_vld1q_lane_v: {
11458     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11459     Ty = llvm::PointerType::getUnqual(VTy->getElementType());
11460     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11461     Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
11462                                        PtrOp0.getAlignment());
11463     return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
11464   }
11465   case NEON::BI__builtin_neon_vld1_dup_v:
11466   case NEON::BI__builtin_neon_vld1q_dup_v: {
11467     Value *V = UndefValue::get(Ty);
11468     Ty = llvm::PointerType::getUnqual(VTy->getElementType());
11469     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11470     Ops[0] = Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0],
11471                                        PtrOp0.getAlignment());
11472     llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
11473     Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
11474     return EmitNeonSplat(Ops[0], CI);
11475   }
11476   case NEON::BI__builtin_neon_vst1_lane_v:
11477   case NEON::BI__builtin_neon_vst1q_lane_v:
11478     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11479     Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
11480     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11481     return Builder.CreateAlignedStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty),
11482                                       PtrOp0.getAlignment());
11483   case NEON::BI__builtin_neon_vld2_v:
11484   case NEON::BI__builtin_neon_vld2q_v: {
11485     llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11486     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11487     llvm::Type *Tys[2] = { VTy, PTy };
11488     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
11489     Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
11490     Ops[0] = Builder.CreateBitCast(Ops[0],
11491                 llvm::PointerType::getUnqual(Ops[1]->getType()));
11492     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11493   }
11494   case NEON::BI__builtin_neon_vld3_v:
11495   case NEON::BI__builtin_neon_vld3q_v: {
11496     llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11497     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11498     llvm::Type *Tys[2] = { VTy, PTy };
11499     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
11500     Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
11501     Ops[0] = Builder.CreateBitCast(Ops[0],
11502                 llvm::PointerType::getUnqual(Ops[1]->getType()));
11503     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11504   }
11505   case NEON::BI__builtin_neon_vld4_v:
11506   case NEON::BI__builtin_neon_vld4q_v: {
11507     llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
11508     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11509     llvm::Type *Tys[2] = { VTy, PTy };
11510     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
11511     Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
11512     Ops[0] = Builder.CreateBitCast(Ops[0],
11513                 llvm::PointerType::getUnqual(Ops[1]->getType()));
11514     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11515   }
11516   case NEON::BI__builtin_neon_vld2_dup_v:
11517   case NEON::BI__builtin_neon_vld2q_dup_v: {
11518     llvm::Type *PTy =
11519       llvm::PointerType::getUnqual(VTy->getElementType());
11520     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11521     llvm::Type *Tys[2] = { VTy, PTy };
11522     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
11523     Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
11524     Ops[0] = Builder.CreateBitCast(Ops[0],
11525                 llvm::PointerType::getUnqual(Ops[1]->getType()));
11526     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11527   }
11528   case NEON::BI__builtin_neon_vld3_dup_v:
11529   case NEON::BI__builtin_neon_vld3q_dup_v: {
11530     llvm::Type *PTy =
11531       llvm::PointerType::getUnqual(VTy->getElementType());
11532     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11533     llvm::Type *Tys[2] = { VTy, PTy };
11534     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
11535     Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
11536     Ops[0] = Builder.CreateBitCast(Ops[0],
11537                 llvm::PointerType::getUnqual(Ops[1]->getType()));
11538     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11539   }
11540   case NEON::BI__builtin_neon_vld4_dup_v:
11541   case NEON::BI__builtin_neon_vld4q_dup_v: {
11542     llvm::Type *PTy =
11543       llvm::PointerType::getUnqual(VTy->getElementType());
11544     Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
11545     llvm::Type *Tys[2] = { VTy, PTy };
11546     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
11547     Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
11548     Ops[0] = Builder.CreateBitCast(Ops[0],
11549                 llvm::PointerType::getUnqual(Ops[1]->getType()));
11550     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11551   }
11552   case NEON::BI__builtin_neon_vld2_lane_v:
11553   case NEON::BI__builtin_neon_vld2q_lane_v: {
11554     llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11555     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
11556     std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11557     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11558     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11559     Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
11560     Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
11561     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11562     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11563     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11564   }
11565   case NEON::BI__builtin_neon_vld3_lane_v:
11566   case NEON::BI__builtin_neon_vld3q_lane_v: {
11567     llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11568     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
11569     std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11570     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11571     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11572     Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
11573     Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
11574     Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
11575     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11576     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11577     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11578   }
11579   case NEON::BI__builtin_neon_vld4_lane_v:
11580   case NEON::BI__builtin_neon_vld4q_lane_v: {
11581     llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
11582     Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
11583     std::rotate(Ops.begin() + 1, Ops.begin() + 2, Ops.end());
11584     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11585     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11586     Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
11587     Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
11588     Ops[5] = Builder.CreateZExt(Ops[5], Int64Ty);
11589     Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
11590     Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
11591     Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
11592     return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
11593   }
11594   case NEON::BI__builtin_neon_vst2_v:
11595   case NEON::BI__builtin_neon_vst2q_v: {
11596     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11597     llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
11598     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
11599                         Ops, "");
11600   }
11601   case NEON::BI__builtin_neon_vst2_lane_v:
11602   case NEON::BI__builtin_neon_vst2q_lane_v: {
11603     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11604     Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
11605     llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
11606     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
11607                         Ops, "");
11608   }
11609   case NEON::BI__builtin_neon_vst3_v:
11610   case NEON::BI__builtin_neon_vst3q_v: {
11611     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11612     llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
11613     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
11614                         Ops, "");
11615   }
11616   case NEON::BI__builtin_neon_vst3_lane_v:
11617   case NEON::BI__builtin_neon_vst3q_lane_v: {
11618     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11619     Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
11620     llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
11621     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
11622                         Ops, "");
11623   }
11624   case NEON::BI__builtin_neon_vst4_v:
11625   case NEON::BI__builtin_neon_vst4q_v: {
11626     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11627     llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
11628     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
11629                         Ops, "");
11630   }
11631   case NEON::BI__builtin_neon_vst4_lane_v:
11632   case NEON::BI__builtin_neon_vst4q_lane_v: {
11633     std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());
11634     Ops[4] = Builder.CreateZExt(Ops[4], Int64Ty);
11635     llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
11636     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
11637                         Ops, "");
11638   }
11639   case NEON::BI__builtin_neon_vtrn_v:
11640   case NEON::BI__builtin_neon_vtrnq_v: {
11641     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11642     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11643     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11644     Value *SV = nullptr;
11645 
11646     for (unsigned vi = 0; vi != 2; ++vi) {
11647       SmallVector<int, 16> Indices;
11648       for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11649         Indices.push_back(i+vi);
11650         Indices.push_back(i+e+vi);
11651       }
11652       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11653       SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vtrn");
11654       SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11655     }
11656     return SV;
11657   }
11658   case NEON::BI__builtin_neon_vuzp_v:
11659   case NEON::BI__builtin_neon_vuzpq_v: {
11660     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11661     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11662     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11663     Value *SV = nullptr;
11664 
11665     for (unsigned vi = 0; vi != 2; ++vi) {
11666       SmallVector<int, 16> Indices;
11667       for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
11668         Indices.push_back(2*i+vi);
11669 
11670       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11671       SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vuzp");
11672       SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11673     }
11674     return SV;
11675   }
11676   case NEON::BI__builtin_neon_vzip_v:
11677   case NEON::BI__builtin_neon_vzipq_v: {
11678     Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11679     Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
11680     Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
11681     Value *SV = nullptr;
11682 
11683     for (unsigned vi = 0; vi != 2; ++vi) {
11684       SmallVector<int, 16> Indices;
11685       for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
11686         Indices.push_back((i + vi*e) >> 1);
11687         Indices.push_back(((i + vi*e) >> 1)+e);
11688       }
11689       Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ty, Ops[0], vi);
11690       SV = Builder.CreateShuffleVector(Ops[1], Ops[2], Indices, "vzip");
11691       SV = Builder.CreateDefaultAlignedStore(SV, Addr);
11692     }
11693     return SV;
11694   }
11695   case NEON::BI__builtin_neon_vqtbl1q_v: {
11696     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
11697                         Ops, "vtbl1");
11698   }
11699   case NEON::BI__builtin_neon_vqtbl2q_v: {
11700     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
11701                         Ops, "vtbl2");
11702   }
11703   case NEON::BI__builtin_neon_vqtbl3q_v: {
11704     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
11705                         Ops, "vtbl3");
11706   }
11707   case NEON::BI__builtin_neon_vqtbl4q_v: {
11708     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
11709                         Ops, "vtbl4");
11710   }
11711   case NEON::BI__builtin_neon_vqtbx1q_v: {
11712     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
11713                         Ops, "vtbx1");
11714   }
11715   case NEON::BI__builtin_neon_vqtbx2q_v: {
11716     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
11717                         Ops, "vtbx2");
11718   }
11719   case NEON::BI__builtin_neon_vqtbx3q_v: {
11720     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
11721                         Ops, "vtbx3");
11722   }
11723   case NEON::BI__builtin_neon_vqtbx4q_v: {
11724     return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
11725                         Ops, "vtbx4");
11726   }
11727   case NEON::BI__builtin_neon_vsqadd_v:
11728   case NEON::BI__builtin_neon_vsqaddq_v: {
11729     Int = Intrinsic::aarch64_neon_usqadd;
11730     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
11731   }
11732   case NEON::BI__builtin_neon_vuqadd_v:
11733   case NEON::BI__builtin_neon_vuqaddq_v: {
11734     Int = Intrinsic::aarch64_neon_suqadd;
11735     return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
11736   }
11737   }
11738 }
11739 
11740 Value *CodeGenFunction::EmitBPFBuiltinExpr(unsigned BuiltinID,
11741                                            const CallExpr *E) {
11742   assert((BuiltinID == BPF::BI__builtin_preserve_field_info ||
11743           BuiltinID == BPF::BI__builtin_btf_type_id ||
11744           BuiltinID == BPF::BI__builtin_preserve_type_info ||
11745           BuiltinID == BPF::BI__builtin_preserve_enum_value) &&
11746          "unexpected BPF builtin");
11747 
11748   // A sequence number, injected into IR builtin functions, to
11749   // prevent CSE given the only difference of the funciton
11750   // may just be the debuginfo metadata.
11751   static uint32_t BuiltinSeqNum;
11752 
11753   switch (BuiltinID) {
11754   default:
11755     llvm_unreachable("Unexpected BPF builtin");
11756   case BPF::BI__builtin_preserve_field_info: {
11757     const Expr *Arg = E->getArg(0);
11758     bool IsBitField = Arg->IgnoreParens()->getObjectKind() == OK_BitField;
11759 
11760     if (!getDebugInfo()) {
11761       CGM.Error(E->getExprLoc(),
11762                 "using __builtin_preserve_field_info() without -g");
11763       return IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11764                         : EmitLValue(Arg).getPointer(*this);
11765     }
11766 
11767     // Enable underlying preserve_*_access_index() generation.
11768     bool OldIsInPreservedAIRegion = IsInPreservedAIRegion;
11769     IsInPreservedAIRegion = true;
11770     Value *FieldAddr = IsBitField ? EmitLValue(Arg).getBitFieldPointer()
11771                                   : EmitLValue(Arg).getPointer(*this);
11772     IsInPreservedAIRegion = OldIsInPreservedAIRegion;
11773 
11774     ConstantInt *C = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11775     Value *InfoKind = ConstantInt::get(Int64Ty, C->getSExtValue());
11776 
11777     // Built the IR for the preserve_field_info intrinsic.
11778     llvm::Function *FnGetFieldInfo = llvm::Intrinsic::getDeclaration(
11779         &CGM.getModule(), llvm::Intrinsic::bpf_preserve_field_info,
11780         {FieldAddr->getType()});
11781     return Builder.CreateCall(FnGetFieldInfo, {FieldAddr, InfoKind});
11782   }
11783   case BPF::BI__builtin_btf_type_id:
11784   case BPF::BI__builtin_preserve_type_info: {
11785     if (!getDebugInfo()) {
11786       CGM.Error(E->getExprLoc(), "using builtin function without -g");
11787       return nullptr;
11788     }
11789 
11790     const Expr *Arg0 = E->getArg(0);
11791     llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11792         Arg0->getType(), Arg0->getExprLoc());
11793 
11794     ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11795     Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11796     Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11797 
11798     llvm::Function *FnDecl;
11799     if (BuiltinID == BPF::BI__builtin_btf_type_id)
11800       FnDecl = llvm::Intrinsic::getDeclaration(
11801           &CGM.getModule(), llvm::Intrinsic::bpf_btf_type_id, {});
11802     else
11803       FnDecl = llvm::Intrinsic::getDeclaration(
11804           &CGM.getModule(), llvm::Intrinsic::bpf_preserve_type_info, {});
11805     CallInst *Fn = Builder.CreateCall(FnDecl, {SeqNumVal, FlagValue});
11806     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11807     return Fn;
11808   }
11809   case BPF::BI__builtin_preserve_enum_value: {
11810     if (!getDebugInfo()) {
11811       CGM.Error(E->getExprLoc(), "using builtin function without -g");
11812       return nullptr;
11813     }
11814 
11815     const Expr *Arg0 = E->getArg(0);
11816     llvm::DIType *DbgInfo = getDebugInfo()->getOrCreateStandaloneType(
11817         Arg0->getType(), Arg0->getExprLoc());
11818 
11819     // Find enumerator
11820     const auto *UO = cast<UnaryOperator>(Arg0->IgnoreParens());
11821     const auto *CE = cast<CStyleCastExpr>(UO->getSubExpr());
11822     const auto *DR = cast<DeclRefExpr>(CE->getSubExpr());
11823     const auto *Enumerator = cast<EnumConstantDecl>(DR->getDecl());
11824 
11825     auto &InitVal = Enumerator->getInitVal();
11826     std::string InitValStr;
11827     if (InitVal.isNegative() || InitVal > uint64_t(INT64_MAX))
11828       InitValStr = std::to_string(InitVal.getSExtValue());
11829     else
11830       InitValStr = std::to_string(InitVal.getZExtValue());
11831     std::string EnumStr = Enumerator->getNameAsString() + ":" + InitValStr;
11832     Value *EnumStrVal = Builder.CreateGlobalStringPtr(EnumStr);
11833 
11834     ConstantInt *Flag = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
11835     Value *FlagValue = ConstantInt::get(Int64Ty, Flag->getSExtValue());
11836     Value *SeqNumVal = ConstantInt::get(Int32Ty, BuiltinSeqNum++);
11837 
11838     llvm::Function *IntrinsicFn = llvm::Intrinsic::getDeclaration(
11839         &CGM.getModule(), llvm::Intrinsic::bpf_preserve_enum_value, {});
11840     CallInst *Fn =
11841         Builder.CreateCall(IntrinsicFn, {SeqNumVal, EnumStrVal, FlagValue});
11842     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
11843     return Fn;
11844   }
11845   }
11846 }
11847 
11848 llvm::Value *CodeGenFunction::
11849 BuildVector(ArrayRef<llvm::Value*> Ops) {
11850   assert((Ops.size() & (Ops.size() - 1)) == 0 &&
11851          "Not a power-of-two sized vector!");
11852   bool AllConstants = true;
11853   for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
11854     AllConstants &= isa<Constant>(Ops[i]);
11855 
11856   // If this is a constant vector, create a ConstantVector.
11857   if (AllConstants) {
11858     SmallVector<llvm::Constant*, 16> CstOps;
11859     for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11860       CstOps.push_back(cast<Constant>(Ops[i]));
11861     return llvm::ConstantVector::get(CstOps);
11862   }
11863 
11864   // Otherwise, insertelement the values to build the vector.
11865   Value *Result = llvm::UndefValue::get(
11866       llvm::FixedVectorType::get(Ops[0]->getType(), Ops.size()));
11867 
11868   for (unsigned i = 0, e = Ops.size(); i != e; ++i)
11869     Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
11870 
11871   return Result;
11872 }
11873 
11874 // Convert the mask from an integer type to a vector of i1.
11875 static Value *getMaskVecValue(CodeGenFunction &CGF, Value *Mask,
11876                               unsigned NumElts) {
11877 
11878   auto *MaskTy = llvm::FixedVectorType::get(
11879       CGF.Builder.getInt1Ty(),
11880       cast<IntegerType>(Mask->getType())->getBitWidth());
11881   Value *MaskVec = CGF.Builder.CreateBitCast(Mask, MaskTy);
11882 
11883   // If we have less than 8 elements, then the starting mask was an i8 and
11884   // we need to extract down to the right number of elements.
11885   if (NumElts < 8) {
11886     int Indices[4];
11887     for (unsigned i = 0; i != NumElts; ++i)
11888       Indices[i] = i;
11889     MaskVec = CGF.Builder.CreateShuffleVector(MaskVec, MaskVec,
11890                                              makeArrayRef(Indices, NumElts),
11891                                              "extract");
11892   }
11893   return MaskVec;
11894 }
11895 
11896 static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11897                                  Align Alignment) {
11898   // Cast the pointer to right type.
11899   Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11900                                llvm::PointerType::getUnqual(Ops[1]->getType()));
11901 
11902   Value *MaskVec = getMaskVecValue(
11903       CGF, Ops[2],
11904       cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
11905 
11906   return CGF.Builder.CreateMaskedStore(Ops[1], Ptr, Alignment, MaskVec);
11907 }
11908 
11909 static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
11910                                 Align Alignment) {
11911   // Cast the pointer to right type.
11912   llvm::Type *Ty = Ops[1]->getType();
11913   Value *Ptr =
11914       CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
11915 
11916   Value *MaskVec = getMaskVecValue(
11917       CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());
11918 
11919   return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
11920 }
11921 
11922 static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
11923                                 ArrayRef<Value *> Ops) {
11924   auto *ResultTy = cast<llvm::VectorType>(Ops[1]->getType());
11925   llvm::Type *PtrTy = ResultTy->getElementType();
11926 
11927   // Cast the pointer to element type.
11928   Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11929                                          llvm::PointerType::getUnqual(PtrTy));
11930 
11931   Value *MaskVec = getMaskVecValue(
11932       CGF, Ops[2], cast<FixedVectorType>(ResultTy)->getNumElements());
11933 
11934   llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_expandload,
11935                                            ResultTy);
11936   return CGF.Builder.CreateCall(F, { Ptr, MaskVec, Ops[1] });
11937 }
11938 
11939 static Value *EmitX86CompressExpand(CodeGenFunction &CGF,
11940                                     ArrayRef<Value *> Ops,
11941                                     bool IsCompress) {
11942   auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11943 
11944   Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11945 
11946   Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress
11947                                  : Intrinsic::x86_avx512_mask_expand;
11948   llvm::Function *F = CGF.CGM.getIntrinsic(IID, ResultTy);
11949   return CGF.Builder.CreateCall(F, { Ops[0], Ops[1], MaskVec });
11950 }
11951 
11952 static Value *EmitX86CompressStore(CodeGenFunction &CGF,
11953                                    ArrayRef<Value *> Ops) {
11954   auto *ResultTy = cast<llvm::FixedVectorType>(Ops[1]->getType());
11955   llvm::Type *PtrTy = ResultTy->getElementType();
11956 
11957   // Cast the pointer to element type.
11958   Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
11959                                          llvm::PointerType::getUnqual(PtrTy));
11960 
11961   Value *MaskVec = getMaskVecValue(CGF, Ops[2], ResultTy->getNumElements());
11962 
11963   llvm::Function *F = CGF.CGM.getIntrinsic(Intrinsic::masked_compressstore,
11964                                            ResultTy);
11965   return CGF.Builder.CreateCall(F, { Ops[1], Ptr, MaskVec });
11966 }
11967 
11968 static Value *EmitX86MaskLogic(CodeGenFunction &CGF, Instruction::BinaryOps Opc,
11969                               ArrayRef<Value *> Ops,
11970                               bool InvertLHS = false) {
11971   unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
11972   Value *LHS = getMaskVecValue(CGF, Ops[0], NumElts);
11973   Value *RHS = getMaskVecValue(CGF, Ops[1], NumElts);
11974 
11975   if (InvertLHS)
11976     LHS = CGF.Builder.CreateNot(LHS);
11977 
11978   return CGF.Builder.CreateBitCast(CGF.Builder.CreateBinOp(Opc, LHS, RHS),
11979                                    Ops[0]->getType());
11980 }
11981 
11982 static Value *EmitX86FunnelShift(CodeGenFunction &CGF, Value *Op0, Value *Op1,
11983                                  Value *Amt, bool IsRight) {
11984   llvm::Type *Ty = Op0->getType();
11985 
11986   // Amount may be scalar immediate, in which case create a splat vector.
11987   // Funnel shifts amounts are treated as modulo and types are all power-of-2 so
11988   // we only care about the lowest log2 bits anyway.
11989   if (Amt->getType() != Ty) {
11990     unsigned NumElts = cast<llvm::FixedVectorType>(Ty)->getNumElements();
11991     Amt = CGF.Builder.CreateIntCast(Amt, Ty->getScalarType(), false);
11992     Amt = CGF.Builder.CreateVectorSplat(NumElts, Amt);
11993   }
11994 
11995   unsigned IID = IsRight ? Intrinsic::fshr : Intrinsic::fshl;
11996   Function *F = CGF.CGM.getIntrinsic(IID, Ty);
11997   return CGF.Builder.CreateCall(F, {Op0, Op1, Amt});
11998 }
11999 
12000 static Value *EmitX86vpcom(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
12001                            bool IsSigned) {
12002   Value *Op0 = Ops[0];
12003   Value *Op1 = Ops[1];
12004   llvm::Type *Ty = Op0->getType();
12005   uint64_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
12006 
12007   CmpInst::Predicate Pred;
12008   switch (Imm) {
12009   case 0x0:
12010     Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
12011     break;
12012   case 0x1:
12013     Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
12014     break;
12015   case 0x2:
12016     Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
12017     break;
12018   case 0x3:
12019     Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
12020     break;
12021   case 0x4:
12022     Pred = ICmpInst::ICMP_EQ;
12023     break;
12024   case 0x5:
12025     Pred = ICmpInst::ICMP_NE;
12026     break;
12027   case 0x6:
12028     return llvm::Constant::getNullValue(Ty); // FALSE
12029   case 0x7:
12030     return llvm::Constant::getAllOnesValue(Ty); // TRUE
12031   default:
12032     llvm_unreachable("Unexpected XOP vpcom/vpcomu predicate");
12033   }
12034 
12035   Value *Cmp = CGF.Builder.CreateICmp(Pred, Op0, Op1);
12036   Value *Res = CGF.Builder.CreateSExt(Cmp, Ty);
12037   return Res;
12038 }
12039 
12040 static Value *EmitX86Select(CodeGenFunction &CGF,
12041                             Value *Mask, Value *Op0, Value *Op1) {
12042 
12043   // If the mask is all ones just return first argument.
12044   if (const auto *C = dyn_cast<Constant>(Mask))
12045     if (C->isAllOnesValue())
12046       return Op0;
12047 
12048   Mask = getMaskVecValue(
12049       CGF, Mask, cast<llvm::FixedVectorType>(Op0->getType())->getNumElements());
12050 
12051   return CGF.Builder.CreateSelect(Mask, Op0, Op1);
12052 }
12053 
12054 static Value *EmitX86ScalarSelect(CodeGenFunction &CGF,
12055                                   Value *Mask, Value *Op0, Value *Op1) {
12056   // If the mask is all ones just return first argument.
12057   if (const auto *C = dyn_cast<Constant>(Mask))
12058     if (C->isAllOnesValue())
12059       return Op0;
12060 
12061   auto *MaskTy = llvm::FixedVectorType::get(
12062       CGF.Builder.getInt1Ty(), Mask->getType()->getIntegerBitWidth());
12063   Mask = CGF.Builder.CreateBitCast(Mask, MaskTy);
12064   Mask = CGF.Builder.CreateExtractElement(Mask, (uint64_t)0);
12065   return CGF.Builder.CreateSelect(Mask, Op0, Op1);
12066 }
12067 
12068 static Value *EmitX86MaskedCompareResult(CodeGenFunction &CGF, Value *Cmp,
12069                                          unsigned NumElts, Value *MaskIn) {
12070   if (MaskIn) {
12071     const auto *C = dyn_cast<Constant>(MaskIn);
12072     if (!C || !C->isAllOnesValue())
12073       Cmp = CGF.Builder.CreateAnd(Cmp, getMaskVecValue(CGF, MaskIn, NumElts));
12074   }
12075 
12076   if (NumElts < 8) {
12077     int Indices[8];
12078     for (unsigned i = 0; i != NumElts; ++i)
12079       Indices[i] = i;
12080     for (unsigned i = NumElts; i != 8; ++i)
12081       Indices[i] = i % NumElts + NumElts;
12082     Cmp = CGF.Builder.CreateShuffleVector(
12083         Cmp, llvm::Constant::getNullValue(Cmp->getType()), Indices);
12084   }
12085 
12086   return CGF.Builder.CreateBitCast(Cmp,
12087                                    IntegerType::get(CGF.getLLVMContext(),
12088                                                     std::max(NumElts, 8U)));
12089 }
12090 
12091 static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
12092                                    bool Signed, ArrayRef<Value *> Ops) {
12093   assert((Ops.size() == 2 || Ops.size() == 4) &&
12094          "Unexpected number of arguments");
12095   unsigned NumElts =
12096       cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12097   Value *Cmp;
12098 
12099   if (CC == 3) {
12100     Cmp = Constant::getNullValue(
12101         llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
12102   } else if (CC == 7) {
12103     Cmp = Constant::getAllOnesValue(
12104         llvm::FixedVectorType::get(CGF.Builder.getInt1Ty(), NumElts));
12105   } else {
12106     ICmpInst::Predicate Pred;
12107     switch (CC) {
12108     default: llvm_unreachable("Unknown condition code");
12109     case 0: Pred = ICmpInst::ICMP_EQ;  break;
12110     case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break;
12111     case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break;
12112     case 4: Pred = ICmpInst::ICMP_NE;  break;
12113     case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break;
12114     case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break;
12115     }
12116     Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
12117   }
12118 
12119   Value *MaskIn = nullptr;
12120   if (Ops.size() == 4)
12121     MaskIn = Ops[3];
12122 
12123   return EmitX86MaskedCompareResult(CGF, Cmp, NumElts, MaskIn);
12124 }
12125 
12126 static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
12127   Value *Zero = Constant::getNullValue(In->getType());
12128   return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
12129 }
12130 
12131 static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
12132                                     ArrayRef<Value *> Ops, bool IsSigned) {
12133   unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
12134   llvm::Type *Ty = Ops[1]->getType();
12135 
12136   Value *Res;
12137   if (Rnd != 4) {
12138     Intrinsic::ID IID = IsSigned ? Intrinsic::x86_avx512_sitofp_round
12139                                  : Intrinsic::x86_avx512_uitofp_round;
12140     Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
12141     Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
12142   } else {
12143     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12144     Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
12145                    : CGF.Builder.CreateUIToFP(Ops[0], Ty);
12146   }
12147 
12148   return EmitX86Select(CGF, Ops[2], Res, Ops[1]);
12149 }
12150 
12151 // Lowers X86 FMA intrinsics to IR.
12152 static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
12153                              ArrayRef<Value *> Ops, unsigned BuiltinID,
12154                              bool IsAddSub) {
12155 
12156   bool Subtract = false;
12157   Intrinsic::ID IID = Intrinsic::not_intrinsic;
12158   switch (BuiltinID) {
12159   default: break;
12160   case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
12161     Subtract = true;
12162     LLVM_FALLTHROUGH;
12163   case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
12164   case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
12165   case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
12166     IID = llvm::Intrinsic::x86_avx512fp16_vfmadd_ph_512;
12167     break;
12168   case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
12169     Subtract = true;
12170     LLVM_FALLTHROUGH;
12171   case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
12172   case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
12173   case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
12174     IID = llvm::Intrinsic::x86_avx512fp16_vfmaddsub_ph_512;
12175     break;
12176   case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
12177     Subtract = true;
12178     LLVM_FALLTHROUGH;
12179   case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
12180   case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
12181   case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
12182     IID = llvm::Intrinsic::x86_avx512_vfmadd_ps_512; break;
12183   case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
12184     Subtract = true;
12185     LLVM_FALLTHROUGH;
12186   case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
12187   case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
12188   case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
12189     IID = llvm::Intrinsic::x86_avx512_vfmadd_pd_512; break;
12190   case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12191     Subtract = true;
12192     LLVM_FALLTHROUGH;
12193   case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
12194   case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12195   case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12196     IID = llvm::Intrinsic::x86_avx512_vfmaddsub_ps_512;
12197     break;
12198   case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12199     Subtract = true;
12200     LLVM_FALLTHROUGH;
12201   case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12202   case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12203   case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12204     IID = llvm::Intrinsic::x86_avx512_vfmaddsub_pd_512;
12205     break;
12206   }
12207 
12208   Value *A = Ops[0];
12209   Value *B = Ops[1];
12210   Value *C = Ops[2];
12211 
12212   if (Subtract)
12213     C = CGF.Builder.CreateFNeg(C);
12214 
12215   Value *Res;
12216 
12217   // Only handle in case of _MM_FROUND_CUR_DIRECTION/4 (no rounding).
12218   if (IID != Intrinsic::not_intrinsic &&
12219       (cast<llvm::ConstantInt>(Ops.back())->getZExtValue() != (uint64_t)4 ||
12220        IsAddSub)) {
12221     Function *Intr = CGF.CGM.getIntrinsic(IID);
12222     Res = CGF.Builder.CreateCall(Intr, {A, B, C, Ops.back() });
12223   } else {
12224     llvm::Type *Ty = A->getType();
12225     Function *FMA;
12226     if (CGF.Builder.getIsFPConstrained()) {
12227       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12228       FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
12229       Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
12230     } else {
12231       FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
12232       Res = CGF.Builder.CreateCall(FMA, {A, B, C});
12233     }
12234   }
12235 
12236   // Handle any required masking.
12237   Value *MaskFalseVal = nullptr;
12238   switch (BuiltinID) {
12239   case clang::X86::BI__builtin_ia32_vfmaddph512_mask:
12240   case clang::X86::BI__builtin_ia32_vfmaddps512_mask:
12241   case clang::X86::BI__builtin_ia32_vfmaddpd512_mask:
12242   case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask:
12243   case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask:
12244   case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12245     MaskFalseVal = Ops[0];
12246     break;
12247   case clang::X86::BI__builtin_ia32_vfmaddph512_maskz:
12248   case clang::X86::BI__builtin_ia32_vfmaddps512_maskz:
12249   case clang::X86::BI__builtin_ia32_vfmaddpd512_maskz:
12250   case clang::X86::BI__builtin_ia32_vfmaddsubph512_maskz:
12251   case clang::X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12252   case clang::X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12253     MaskFalseVal = Constant::getNullValue(Ops[0]->getType());
12254     break;
12255   case clang::X86::BI__builtin_ia32_vfmsubph512_mask3:
12256   case clang::X86::BI__builtin_ia32_vfmaddph512_mask3:
12257   case clang::X86::BI__builtin_ia32_vfmsubps512_mask3:
12258   case clang::X86::BI__builtin_ia32_vfmaddps512_mask3:
12259   case clang::X86::BI__builtin_ia32_vfmsubpd512_mask3:
12260   case clang::X86::BI__builtin_ia32_vfmaddpd512_mask3:
12261   case clang::X86::BI__builtin_ia32_vfmsubaddph512_mask3:
12262   case clang::X86::BI__builtin_ia32_vfmaddsubph512_mask3:
12263   case clang::X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12264   case clang::X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12265   case clang::X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12266   case clang::X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12267     MaskFalseVal = Ops[2];
12268     break;
12269   }
12270 
12271   if (MaskFalseVal)
12272     return EmitX86Select(CGF, Ops[3], Res, MaskFalseVal);
12273 
12274   return Res;
12275 }
12276 
12277 static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
12278                                 MutableArrayRef<Value *> Ops, Value *Upper,
12279                                 bool ZeroMask = false, unsigned PTIdx = 0,
12280                                 bool NegAcc = false) {
12281   unsigned Rnd = 4;
12282   if (Ops.size() > 4)
12283     Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
12284 
12285   if (NegAcc)
12286     Ops[2] = CGF.Builder.CreateFNeg(Ops[2]);
12287 
12288   Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], (uint64_t)0);
12289   Ops[1] = CGF.Builder.CreateExtractElement(Ops[1], (uint64_t)0);
12290   Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], (uint64_t)0);
12291   Value *Res;
12292   if (Rnd != 4) {
12293     Intrinsic::ID IID;
12294 
12295     switch (Ops[0]->getType()->getPrimitiveSizeInBits()) {
12296     case 16:
12297       IID = Intrinsic::x86_avx512fp16_vfmadd_f16;
12298       break;
12299     case 32:
12300       IID = Intrinsic::x86_avx512_vfmadd_f32;
12301       break;
12302     case 64:
12303       IID = Intrinsic::x86_avx512_vfmadd_f64;
12304       break;
12305     default:
12306       llvm_unreachable("Unexpected size");
12307     }
12308     Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
12309                                  {Ops[0], Ops[1], Ops[2], Ops[4]});
12310   } else if (CGF.Builder.getIsFPConstrained()) {
12311     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
12312     Function *FMA = CGF.CGM.getIntrinsic(
12313         Intrinsic::experimental_constrained_fma, Ops[0]->getType());
12314     Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
12315   } else {
12316     Function *FMA = CGF.CGM.getIntrinsic(Intrinsic::fma, Ops[0]->getType());
12317     Res = CGF.Builder.CreateCall(FMA, Ops.slice(0, 3));
12318   }
12319   // If we have more than 3 arguments, we need to do masking.
12320   if (Ops.size() > 3) {
12321     Value *PassThru = ZeroMask ? Constant::getNullValue(Res->getType())
12322                                : Ops[PTIdx];
12323 
12324     // If we negated the accumulator and the its the PassThru value we need to
12325     // bypass the negate. Conveniently Upper should be the same thing in this
12326     // case.
12327     if (NegAcc && PTIdx == 2)
12328       PassThru = CGF.Builder.CreateExtractElement(Upper, (uint64_t)0);
12329 
12330     Res = EmitX86ScalarSelect(CGF, Ops[3], Res, PassThru);
12331   }
12332   return CGF.Builder.CreateInsertElement(Upper, Res, (uint64_t)0);
12333 }
12334 
12335 static Value *EmitX86Muldq(CodeGenFunction &CGF, bool IsSigned,
12336                            ArrayRef<Value *> Ops) {
12337   llvm::Type *Ty = Ops[0]->getType();
12338   // Arguments have a vXi32 type so cast to vXi64.
12339   Ty = llvm::FixedVectorType::get(CGF.Int64Ty,
12340                                   Ty->getPrimitiveSizeInBits() / 64);
12341   Value *LHS = CGF.Builder.CreateBitCast(Ops[0], Ty);
12342   Value *RHS = CGF.Builder.CreateBitCast(Ops[1], Ty);
12343 
12344   if (IsSigned) {
12345     // Shift left then arithmetic shift right.
12346     Constant *ShiftAmt = ConstantInt::get(Ty, 32);
12347     LHS = CGF.Builder.CreateShl(LHS, ShiftAmt);
12348     LHS = CGF.Builder.CreateAShr(LHS, ShiftAmt);
12349     RHS = CGF.Builder.CreateShl(RHS, ShiftAmt);
12350     RHS = CGF.Builder.CreateAShr(RHS, ShiftAmt);
12351   } else {
12352     // Clear the upper bits.
12353     Constant *Mask = ConstantInt::get(Ty, 0xffffffff);
12354     LHS = CGF.Builder.CreateAnd(LHS, Mask);
12355     RHS = CGF.Builder.CreateAnd(RHS, Mask);
12356   }
12357 
12358   return CGF.Builder.CreateMul(LHS, RHS);
12359 }
12360 
12361 // Emit a masked pternlog intrinsic. This only exists because the header has to
12362 // use a macro and we aren't able to pass the input argument to a pternlog
12363 // builtin and a select builtin without evaluating it twice.
12364 static Value *EmitX86Ternlog(CodeGenFunction &CGF, bool ZeroMask,
12365                              ArrayRef<Value *> Ops) {
12366   llvm::Type *Ty = Ops[0]->getType();
12367 
12368   unsigned VecWidth = Ty->getPrimitiveSizeInBits();
12369   unsigned EltWidth = Ty->getScalarSizeInBits();
12370   Intrinsic::ID IID;
12371   if (VecWidth == 128 && EltWidth == 32)
12372     IID = Intrinsic::x86_avx512_pternlog_d_128;
12373   else if (VecWidth == 256 && EltWidth == 32)
12374     IID = Intrinsic::x86_avx512_pternlog_d_256;
12375   else if (VecWidth == 512 && EltWidth == 32)
12376     IID = Intrinsic::x86_avx512_pternlog_d_512;
12377   else if (VecWidth == 128 && EltWidth == 64)
12378     IID = Intrinsic::x86_avx512_pternlog_q_128;
12379   else if (VecWidth == 256 && EltWidth == 64)
12380     IID = Intrinsic::x86_avx512_pternlog_q_256;
12381   else if (VecWidth == 512 && EltWidth == 64)
12382     IID = Intrinsic::x86_avx512_pternlog_q_512;
12383   else
12384     llvm_unreachable("Unexpected intrinsic");
12385 
12386   Value *Ternlog = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
12387                                           Ops.drop_back());
12388   Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) : Ops[0];
12389   return EmitX86Select(CGF, Ops[4], Ternlog, PassThru);
12390 }
12391 
12392 static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
12393                               llvm::Type *DstTy) {
12394   unsigned NumberOfElements =
12395       cast<llvm::FixedVectorType>(DstTy)->getNumElements();
12396   Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
12397   return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
12398 }
12399 
12400 // Emit binary intrinsic with the same type used in result/args.
12401 static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
12402                                      ArrayRef<Value *> Ops, Intrinsic::ID IID) {
12403   llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
12404   return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
12405 }
12406 
12407 Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
12408   const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
12409   StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
12410   return EmitX86CpuIs(CPUStr);
12411 }
12412 
12413 // Convert F16 halfs to floats.
12414 static Value *EmitX86CvtF16ToFloatExpr(CodeGenFunction &CGF,
12415                                        ArrayRef<Value *> Ops,
12416                                        llvm::Type *DstTy) {
12417   assert((Ops.size() == 1 || Ops.size() == 3 || Ops.size() == 4) &&
12418          "Unknown cvtph2ps intrinsic");
12419 
12420   // If the SAE intrinsic doesn't use default rounding then we can't upgrade.
12421   if (Ops.size() == 4 && cast<llvm::ConstantInt>(Ops[3])->getZExtValue() != 4) {
12422     Function *F =
12423         CGF.CGM.getIntrinsic(Intrinsic::x86_avx512_mask_vcvtph2ps_512);
12424     return CGF.Builder.CreateCall(F, {Ops[0], Ops[1], Ops[2], Ops[3]});
12425   }
12426 
12427   unsigned NumDstElts = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
12428   Value *Src = Ops[0];
12429 
12430   // Extract the subvector.
12431   if (NumDstElts !=
12432       cast<llvm::FixedVectorType>(Src->getType())->getNumElements()) {
12433     assert(NumDstElts == 4 && "Unexpected vector size");
12434     Src = CGF.Builder.CreateShuffleVector(Src, ArrayRef<int>{0, 1, 2, 3});
12435   }
12436 
12437   // Bitcast from vXi16 to vXf16.
12438   auto *HalfTy = llvm::FixedVectorType::get(
12439       llvm::Type::getHalfTy(CGF.getLLVMContext()), NumDstElts);
12440   Src = CGF.Builder.CreateBitCast(Src, HalfTy);
12441 
12442   // Perform the fp-extension.
12443   Value *Res = CGF.Builder.CreateFPExt(Src, DstTy, "cvtph2ps");
12444 
12445   if (Ops.size() >= 3)
12446     Res = EmitX86Select(CGF, Ops[2], Res, Ops[1]);
12447   return Res;
12448 }
12449 
12450 // Convert a BF16 to a float.
12451 static Value *EmitX86CvtBF16ToFloatExpr(CodeGenFunction &CGF,
12452                                         const CallExpr *E,
12453                                         ArrayRef<Value *> Ops) {
12454   llvm::Type *Int32Ty = CGF.Builder.getInt32Ty();
12455   Value *ZeroExt = CGF.Builder.CreateZExt(Ops[0], Int32Ty);
12456   Value *Shl = CGF.Builder.CreateShl(ZeroExt, 16);
12457   llvm::Type *ResultType = CGF.ConvertType(E->getType());
12458   Value *BitCast = CGF.Builder.CreateBitCast(Shl, ResultType);
12459   return BitCast;
12460 }
12461 
12462 Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
12463 
12464   llvm::Type *Int32Ty = Builder.getInt32Ty();
12465 
12466   // Matching the struct layout from the compiler-rt/libgcc structure that is
12467   // filled in:
12468   // unsigned int __cpu_vendor;
12469   // unsigned int __cpu_type;
12470   // unsigned int __cpu_subtype;
12471   // unsigned int __cpu_features[1];
12472   llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
12473                                           llvm::ArrayType::get(Int32Ty, 1));
12474 
12475   // Grab the global __cpu_model.
12476   llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
12477   cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
12478 
12479   // Calculate the index needed to access the correct field based on the
12480   // range. Also adjust the expected value.
12481   unsigned Index;
12482   unsigned Value;
12483   std::tie(Index, Value) = StringSwitch<std::pair<unsigned, unsigned>>(CPUStr)
12484 #define X86_VENDOR(ENUM, STRING)                                               \
12485   .Case(STRING, {0u, static_cast<unsigned>(llvm::X86::ENUM)})
12486 #define X86_CPU_TYPE_ALIAS(ENUM, ALIAS)                                        \
12487   .Case(ALIAS, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
12488 #define X86_CPU_TYPE(ENUM, STR)                                                \
12489   .Case(STR, {1u, static_cast<unsigned>(llvm::X86::ENUM)})
12490 #define X86_CPU_SUBTYPE(ENUM, STR)                                             \
12491   .Case(STR, {2u, static_cast<unsigned>(llvm::X86::ENUM)})
12492 #include "llvm/Support/X86TargetParser.def"
12493                                .Default({0, 0});
12494   assert(Value != 0 && "Invalid CPUStr passed to CpuIs");
12495 
12496   // Grab the appropriate field from __cpu_model.
12497   llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
12498                          ConstantInt::get(Int32Ty, Index)};
12499   llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
12500   CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
12501                                        CharUnits::fromQuantity(4));
12502 
12503   // Check the value of the field against the requested value.
12504   return Builder.CreateICmpEQ(CpuValue,
12505                                   llvm::ConstantInt::get(Int32Ty, Value));
12506 }
12507 
12508 Value *CodeGenFunction::EmitX86CpuSupports(const CallExpr *E) {
12509   const Expr *FeatureExpr = E->getArg(0)->IgnoreParenCasts();
12510   StringRef FeatureStr = cast<StringLiteral>(FeatureExpr)->getString();
12511   return EmitX86CpuSupports(FeatureStr);
12512 }
12513 
12514 Value *CodeGenFunction::EmitX86CpuSupports(ArrayRef<StringRef> FeatureStrs) {
12515   return EmitX86CpuSupports(llvm::X86::getCpuSupportsMask(FeatureStrs));
12516 }
12517 
12518 llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
12519   uint32_t Features1 = Lo_32(FeaturesMask);
12520   uint32_t Features2 = Hi_32(FeaturesMask);
12521 
12522   Value *Result = Builder.getTrue();
12523 
12524   if (Features1 != 0) {
12525     // Matching the struct layout from the compiler-rt/libgcc structure that is
12526     // filled in:
12527     // unsigned int __cpu_vendor;
12528     // unsigned int __cpu_type;
12529     // unsigned int __cpu_subtype;
12530     // unsigned int __cpu_features[1];
12531     llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty,
12532                                             llvm::ArrayType::get(Int32Ty, 1));
12533 
12534     // Grab the global __cpu_model.
12535     llvm::Constant *CpuModel = CGM.CreateRuntimeVariable(STy, "__cpu_model");
12536     cast<llvm::GlobalValue>(CpuModel)->setDSOLocal(true);
12537 
12538     // Grab the first (0th) element from the field __cpu_features off of the
12539     // global in the struct STy.
12540     Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
12541                      Builder.getInt32(0)};
12542     Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
12543     Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures,
12544                                                 CharUnits::fromQuantity(4));
12545 
12546     // Check the value of the bit corresponding to the feature requested.
12547     Value *Mask = Builder.getInt32(Features1);
12548     Value *Bitset = Builder.CreateAnd(Features, Mask);
12549     Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
12550     Result = Builder.CreateAnd(Result, Cmp);
12551   }
12552 
12553   if (Features2 != 0) {
12554     llvm::Constant *CpuFeatures2 = CGM.CreateRuntimeVariable(Int32Ty,
12555                                                              "__cpu_features2");
12556     cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
12557 
12558     Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2,
12559                                                 CharUnits::fromQuantity(4));
12560 
12561     // Check the value of the bit corresponding to the feature requested.
12562     Value *Mask = Builder.getInt32(Features2);
12563     Value *Bitset = Builder.CreateAnd(Features, Mask);
12564     Value *Cmp = Builder.CreateICmpEQ(Bitset, Mask);
12565     Result = Builder.CreateAnd(Result, Cmp);
12566   }
12567 
12568   return Result;
12569 }
12570 
12571 Value *CodeGenFunction::EmitX86CpuInit() {
12572   llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy,
12573                                                     /*Variadic*/ false);
12574   llvm::FunctionCallee Func =
12575       CGM.CreateRuntimeFunction(FTy, "__cpu_indicator_init");
12576   cast<llvm::GlobalValue>(Func.getCallee())->setDSOLocal(true);
12577   cast<llvm::GlobalValue>(Func.getCallee())
12578       ->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
12579   return Builder.CreateCall(Func);
12580 }
12581 
12582 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
12583                                            const CallExpr *E) {
12584   if (BuiltinID == X86::BI__builtin_cpu_is)
12585     return EmitX86CpuIs(E);
12586   if (BuiltinID == X86::BI__builtin_cpu_supports)
12587     return EmitX86CpuSupports(E);
12588   if (BuiltinID == X86::BI__builtin_cpu_init)
12589     return EmitX86CpuInit();
12590 
12591   // Handle MSVC intrinsics before argument evaluation to prevent double
12592   // evaluation.
12593   if (Optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
12594     return EmitMSVCBuiltinExpr(*MsvcIntId, E);
12595 
12596   SmallVector<Value*, 4> Ops;
12597   bool IsMaskFCmp = false;
12598   bool IsConjFMA = false;
12599 
12600   // Find out if any arguments are required to be integer constant expressions.
12601   unsigned ICEArguments = 0;
12602   ASTContext::GetBuiltinTypeError Error;
12603   getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
12604   assert(Error == ASTContext::GE_None && "Should not codegen an error");
12605 
12606   for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
12607     // If this is a normal argument, just emit it as a scalar.
12608     if ((ICEArguments & (1 << i)) == 0) {
12609       Ops.push_back(EmitScalarExpr(E->getArg(i)));
12610       continue;
12611     }
12612 
12613     // If this is required to be a constant, constant fold it so that we know
12614     // that the generated intrinsic gets a ConstantInt.
12615     Ops.push_back(llvm::ConstantInt::get(
12616         getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
12617   }
12618 
12619   // These exist so that the builtin that takes an immediate can be bounds
12620   // checked by clang to avoid passing bad immediates to the backend. Since
12621   // AVX has a larger immediate than SSE we would need separate builtins to
12622   // do the different bounds checking. Rather than create a clang specific
12623   // SSE only builtin, this implements eight separate builtins to match gcc
12624   // implementation.
12625   auto getCmpIntrinsicCall = [this, &Ops](Intrinsic::ID ID, unsigned Imm) {
12626     Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
12627     llvm::Function *F = CGM.getIntrinsic(ID);
12628     return Builder.CreateCall(F, Ops);
12629   };
12630 
12631   // For the vector forms of FP comparisons, translate the builtins directly to
12632   // IR.
12633   // TODO: The builtins could be removed if the SSE header files used vector
12634   // extension comparisons directly (vector ordered/unordered may need
12635   // additional support via __builtin_isnan()).
12636   auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred,
12637                                          bool IsSignaling) {
12638     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
12639     Value *Cmp;
12640     if (IsSignaling)
12641       Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
12642     else
12643       Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
12644     llvm::VectorType *FPVecTy = cast<llvm::VectorType>(Ops[0]->getType());
12645     llvm::VectorType *IntVecTy = llvm::VectorType::getInteger(FPVecTy);
12646     Value *Sext = Builder.CreateSExt(Cmp, IntVecTy);
12647     return Builder.CreateBitCast(Sext, FPVecTy);
12648   };
12649 
12650   switch (BuiltinID) {
12651   default: return nullptr;
12652   case X86::BI_mm_prefetch: {
12653     Value *Address = Ops[0];
12654     ConstantInt *C = cast<ConstantInt>(Ops[1]);
12655     Value *RW = ConstantInt::get(Int32Ty, (C->getZExtValue() >> 2) & 0x1);
12656     Value *Locality = ConstantInt::get(Int32Ty, C->getZExtValue() & 0x3);
12657     Value *Data = ConstantInt::get(Int32Ty, 1);
12658     Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
12659     return Builder.CreateCall(F, {Address, RW, Locality, Data});
12660   }
12661   case X86::BI_mm_clflush: {
12662     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
12663                               Ops[0]);
12664   }
12665   case X86::BI_mm_lfence: {
12666     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
12667   }
12668   case X86::BI_mm_mfence: {
12669     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
12670   }
12671   case X86::BI_mm_sfence: {
12672     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
12673   }
12674   case X86::BI_mm_pause: {
12675     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
12676   }
12677   case X86::BI__rdtsc: {
12678     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
12679   }
12680   case X86::BI__builtin_ia32_rdtscp: {
12681     Value *Call = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtscp));
12682     Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
12683                                       Ops[0]);
12684     return Builder.CreateExtractValue(Call, 0);
12685   }
12686   case X86::BI__builtin_ia32_lzcnt_u16:
12687   case X86::BI__builtin_ia32_lzcnt_u32:
12688   case X86::BI__builtin_ia32_lzcnt_u64: {
12689     Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
12690     return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12691   }
12692   case X86::BI__builtin_ia32_tzcnt_u16:
12693   case X86::BI__builtin_ia32_tzcnt_u32:
12694   case X86::BI__builtin_ia32_tzcnt_u64: {
12695     Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
12696     return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
12697   }
12698   case X86::BI__builtin_ia32_undef128:
12699   case X86::BI__builtin_ia32_undef256:
12700   case X86::BI__builtin_ia32_undef512:
12701     // The x86 definition of "undef" is not the same as the LLVM definition
12702     // (PR32176). We leave optimizing away an unnecessary zero constant to the
12703     // IR optimizer and backend.
12704     // TODO: If we had a "freeze" IR instruction to generate a fixed undef
12705     // value, we should use that here instead of a zero.
12706     return llvm::Constant::getNullValue(ConvertType(E->getType()));
12707   case X86::BI__builtin_ia32_vec_init_v8qi:
12708   case X86::BI__builtin_ia32_vec_init_v4hi:
12709   case X86::BI__builtin_ia32_vec_init_v2si:
12710     return Builder.CreateBitCast(BuildVector(Ops),
12711                                  llvm::Type::getX86_MMXTy(getLLVMContext()));
12712   case X86::BI__builtin_ia32_vec_ext_v2si:
12713   case X86::BI__builtin_ia32_vec_ext_v16qi:
12714   case X86::BI__builtin_ia32_vec_ext_v8hi:
12715   case X86::BI__builtin_ia32_vec_ext_v4si:
12716   case X86::BI__builtin_ia32_vec_ext_v4sf:
12717   case X86::BI__builtin_ia32_vec_ext_v2di:
12718   case X86::BI__builtin_ia32_vec_ext_v32qi:
12719   case X86::BI__builtin_ia32_vec_ext_v16hi:
12720   case X86::BI__builtin_ia32_vec_ext_v8si:
12721   case X86::BI__builtin_ia32_vec_ext_v4di: {
12722     unsigned NumElts =
12723         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12724     uint64_t Index = cast<ConstantInt>(Ops[1])->getZExtValue();
12725     Index &= NumElts - 1;
12726     // These builtins exist so we can ensure the index is an ICE and in range.
12727     // Otherwise we could just do this in the header file.
12728     return Builder.CreateExtractElement(Ops[0], Index);
12729   }
12730   case X86::BI__builtin_ia32_vec_set_v16qi:
12731   case X86::BI__builtin_ia32_vec_set_v8hi:
12732   case X86::BI__builtin_ia32_vec_set_v4si:
12733   case X86::BI__builtin_ia32_vec_set_v2di:
12734   case X86::BI__builtin_ia32_vec_set_v32qi:
12735   case X86::BI__builtin_ia32_vec_set_v16hi:
12736   case X86::BI__builtin_ia32_vec_set_v8si:
12737   case X86::BI__builtin_ia32_vec_set_v4di: {
12738     unsigned NumElts =
12739         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
12740     unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
12741     Index &= NumElts - 1;
12742     // These builtins exist so we can ensure the index is an ICE and in range.
12743     // Otherwise we could just do this in the header file.
12744     return Builder.CreateInsertElement(Ops[0], Ops[1], Index);
12745   }
12746   case X86::BI_mm_setcsr:
12747   case X86::BI__builtin_ia32_ldmxcsr: {
12748     Address Tmp = CreateMemTemp(E->getArg(0)->getType());
12749     Builder.CreateStore(Ops[0], Tmp);
12750     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
12751                           Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12752   }
12753   case X86::BI_mm_getcsr:
12754   case X86::BI__builtin_ia32_stmxcsr: {
12755     Address Tmp = CreateMemTemp(E->getType());
12756     Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
12757                        Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
12758     return Builder.CreateLoad(Tmp, "stmxcsr");
12759   }
12760   case X86::BI__builtin_ia32_xsave:
12761   case X86::BI__builtin_ia32_xsave64:
12762   case X86::BI__builtin_ia32_xrstor:
12763   case X86::BI__builtin_ia32_xrstor64:
12764   case X86::BI__builtin_ia32_xsaveopt:
12765   case X86::BI__builtin_ia32_xsaveopt64:
12766   case X86::BI__builtin_ia32_xrstors:
12767   case X86::BI__builtin_ia32_xrstors64:
12768   case X86::BI__builtin_ia32_xsavec:
12769   case X86::BI__builtin_ia32_xsavec64:
12770   case X86::BI__builtin_ia32_xsaves:
12771   case X86::BI__builtin_ia32_xsaves64:
12772   case X86::BI__builtin_ia32_xsetbv:
12773   case X86::BI_xsetbv: {
12774     Intrinsic::ID ID;
12775 #define INTRINSIC_X86_XSAVE_ID(NAME) \
12776     case X86::BI__builtin_ia32_##NAME: \
12777       ID = Intrinsic::x86_##NAME; \
12778       break
12779     switch (BuiltinID) {
12780     default: llvm_unreachable("Unsupported intrinsic!");
12781     INTRINSIC_X86_XSAVE_ID(xsave);
12782     INTRINSIC_X86_XSAVE_ID(xsave64);
12783     INTRINSIC_X86_XSAVE_ID(xrstor);
12784     INTRINSIC_X86_XSAVE_ID(xrstor64);
12785     INTRINSIC_X86_XSAVE_ID(xsaveopt);
12786     INTRINSIC_X86_XSAVE_ID(xsaveopt64);
12787     INTRINSIC_X86_XSAVE_ID(xrstors);
12788     INTRINSIC_X86_XSAVE_ID(xrstors64);
12789     INTRINSIC_X86_XSAVE_ID(xsavec);
12790     INTRINSIC_X86_XSAVE_ID(xsavec64);
12791     INTRINSIC_X86_XSAVE_ID(xsaves);
12792     INTRINSIC_X86_XSAVE_ID(xsaves64);
12793     INTRINSIC_X86_XSAVE_ID(xsetbv);
12794     case X86::BI_xsetbv:
12795       ID = Intrinsic::x86_xsetbv;
12796       break;
12797     }
12798 #undef INTRINSIC_X86_XSAVE_ID
12799     Value *Mhi = Builder.CreateTrunc(
12800       Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
12801     Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
12802     Ops[1] = Mhi;
12803     Ops.push_back(Mlo);
12804     return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
12805   }
12806   case X86::BI__builtin_ia32_xgetbv:
12807   case X86::BI_xgetbv:
12808     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_xgetbv), Ops);
12809   case X86::BI__builtin_ia32_storedqudi128_mask:
12810   case X86::BI__builtin_ia32_storedqusi128_mask:
12811   case X86::BI__builtin_ia32_storedquhi128_mask:
12812   case X86::BI__builtin_ia32_storedquqi128_mask:
12813   case X86::BI__builtin_ia32_storeupd128_mask:
12814   case X86::BI__builtin_ia32_storeups128_mask:
12815   case X86::BI__builtin_ia32_storedqudi256_mask:
12816   case X86::BI__builtin_ia32_storedqusi256_mask:
12817   case X86::BI__builtin_ia32_storedquhi256_mask:
12818   case X86::BI__builtin_ia32_storedquqi256_mask:
12819   case X86::BI__builtin_ia32_storeupd256_mask:
12820   case X86::BI__builtin_ia32_storeups256_mask:
12821   case X86::BI__builtin_ia32_storedqudi512_mask:
12822   case X86::BI__builtin_ia32_storedqusi512_mask:
12823   case X86::BI__builtin_ia32_storedquhi512_mask:
12824   case X86::BI__builtin_ia32_storedquqi512_mask:
12825   case X86::BI__builtin_ia32_storeupd512_mask:
12826   case X86::BI__builtin_ia32_storeups512_mask:
12827     return EmitX86MaskedStore(*this, Ops, Align(1));
12828 
12829   case X86::BI__builtin_ia32_storesh128_mask:
12830   case X86::BI__builtin_ia32_storess128_mask:
12831   case X86::BI__builtin_ia32_storesd128_mask:
12832     return EmitX86MaskedStore(*this, Ops, Align(1));
12833 
12834   case X86::BI__builtin_ia32_vpopcntb_128:
12835   case X86::BI__builtin_ia32_vpopcntd_128:
12836   case X86::BI__builtin_ia32_vpopcntq_128:
12837   case X86::BI__builtin_ia32_vpopcntw_128:
12838   case X86::BI__builtin_ia32_vpopcntb_256:
12839   case X86::BI__builtin_ia32_vpopcntd_256:
12840   case X86::BI__builtin_ia32_vpopcntq_256:
12841   case X86::BI__builtin_ia32_vpopcntw_256:
12842   case X86::BI__builtin_ia32_vpopcntb_512:
12843   case X86::BI__builtin_ia32_vpopcntd_512:
12844   case X86::BI__builtin_ia32_vpopcntq_512:
12845   case X86::BI__builtin_ia32_vpopcntw_512: {
12846     llvm::Type *ResultType = ConvertType(E->getType());
12847     llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
12848     return Builder.CreateCall(F, Ops);
12849   }
12850   case X86::BI__builtin_ia32_cvtmask2b128:
12851   case X86::BI__builtin_ia32_cvtmask2b256:
12852   case X86::BI__builtin_ia32_cvtmask2b512:
12853   case X86::BI__builtin_ia32_cvtmask2w128:
12854   case X86::BI__builtin_ia32_cvtmask2w256:
12855   case X86::BI__builtin_ia32_cvtmask2w512:
12856   case X86::BI__builtin_ia32_cvtmask2d128:
12857   case X86::BI__builtin_ia32_cvtmask2d256:
12858   case X86::BI__builtin_ia32_cvtmask2d512:
12859   case X86::BI__builtin_ia32_cvtmask2q128:
12860   case X86::BI__builtin_ia32_cvtmask2q256:
12861   case X86::BI__builtin_ia32_cvtmask2q512:
12862     return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
12863 
12864   case X86::BI__builtin_ia32_cvtb2mask128:
12865   case X86::BI__builtin_ia32_cvtb2mask256:
12866   case X86::BI__builtin_ia32_cvtb2mask512:
12867   case X86::BI__builtin_ia32_cvtw2mask128:
12868   case X86::BI__builtin_ia32_cvtw2mask256:
12869   case X86::BI__builtin_ia32_cvtw2mask512:
12870   case X86::BI__builtin_ia32_cvtd2mask128:
12871   case X86::BI__builtin_ia32_cvtd2mask256:
12872   case X86::BI__builtin_ia32_cvtd2mask512:
12873   case X86::BI__builtin_ia32_cvtq2mask128:
12874   case X86::BI__builtin_ia32_cvtq2mask256:
12875   case X86::BI__builtin_ia32_cvtq2mask512:
12876     return EmitX86ConvertToMask(*this, Ops[0]);
12877 
12878   case X86::BI__builtin_ia32_cvtdq2ps512_mask:
12879   case X86::BI__builtin_ia32_cvtqq2ps512_mask:
12880   case X86::BI__builtin_ia32_cvtqq2pd512_mask:
12881   case X86::BI__builtin_ia32_vcvtw2ph512_mask:
12882   case X86::BI__builtin_ia32_vcvtdq2ph512_mask:
12883   case X86::BI__builtin_ia32_vcvtqq2ph512_mask:
12884     return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
12885   case X86::BI__builtin_ia32_cvtudq2ps512_mask:
12886   case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
12887   case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
12888   case X86::BI__builtin_ia32_vcvtuw2ph512_mask:
12889   case X86::BI__builtin_ia32_vcvtudq2ph512_mask:
12890   case X86::BI__builtin_ia32_vcvtuqq2ph512_mask:
12891     return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
12892 
12893   case X86::BI__builtin_ia32_vfmaddss3:
12894   case X86::BI__builtin_ia32_vfmaddsd3:
12895   case X86::BI__builtin_ia32_vfmaddsh3_mask:
12896   case X86::BI__builtin_ia32_vfmaddss3_mask:
12897   case X86::BI__builtin_ia32_vfmaddsd3_mask:
12898     return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
12899   case X86::BI__builtin_ia32_vfmaddss:
12900   case X86::BI__builtin_ia32_vfmaddsd:
12901     return EmitScalarFMAExpr(*this, E, Ops,
12902                              Constant::getNullValue(Ops[0]->getType()));
12903   case X86::BI__builtin_ia32_vfmaddsh3_maskz:
12904   case X86::BI__builtin_ia32_vfmaddss3_maskz:
12905   case X86::BI__builtin_ia32_vfmaddsd3_maskz:
12906     return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
12907   case X86::BI__builtin_ia32_vfmaddsh3_mask3:
12908   case X86::BI__builtin_ia32_vfmaddss3_mask3:
12909   case X86::BI__builtin_ia32_vfmaddsd3_mask3:
12910     return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
12911   case X86::BI__builtin_ia32_vfmsubsh3_mask3:
12912   case X86::BI__builtin_ia32_vfmsubss3_mask3:
12913   case X86::BI__builtin_ia32_vfmsubsd3_mask3:
12914     return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
12915                              /*NegAcc*/ true);
12916   case X86::BI__builtin_ia32_vfmaddph:
12917   case X86::BI__builtin_ia32_vfmaddps:
12918   case X86::BI__builtin_ia32_vfmaddpd:
12919   case X86::BI__builtin_ia32_vfmaddph256:
12920   case X86::BI__builtin_ia32_vfmaddps256:
12921   case X86::BI__builtin_ia32_vfmaddpd256:
12922   case X86::BI__builtin_ia32_vfmaddph512_mask:
12923   case X86::BI__builtin_ia32_vfmaddph512_maskz:
12924   case X86::BI__builtin_ia32_vfmaddph512_mask3:
12925   case X86::BI__builtin_ia32_vfmaddps512_mask:
12926   case X86::BI__builtin_ia32_vfmaddps512_maskz:
12927   case X86::BI__builtin_ia32_vfmaddps512_mask3:
12928   case X86::BI__builtin_ia32_vfmsubps512_mask3:
12929   case X86::BI__builtin_ia32_vfmaddpd512_mask:
12930   case X86::BI__builtin_ia32_vfmaddpd512_maskz:
12931   case X86::BI__builtin_ia32_vfmaddpd512_mask3:
12932   case X86::BI__builtin_ia32_vfmsubpd512_mask3:
12933   case X86::BI__builtin_ia32_vfmsubph512_mask3:
12934     return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
12935   case X86::BI__builtin_ia32_vfmaddsubph512_mask:
12936   case X86::BI__builtin_ia32_vfmaddsubph512_maskz:
12937   case X86::BI__builtin_ia32_vfmaddsubph512_mask3:
12938   case X86::BI__builtin_ia32_vfmsubaddph512_mask3:
12939   case X86::BI__builtin_ia32_vfmaddsubps512_mask:
12940   case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
12941   case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
12942   case X86::BI__builtin_ia32_vfmsubaddps512_mask3:
12943   case X86::BI__builtin_ia32_vfmaddsubpd512_mask:
12944   case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
12945   case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
12946   case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
12947     return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true);
12948 
12949   case X86::BI__builtin_ia32_movdqa32store128_mask:
12950   case X86::BI__builtin_ia32_movdqa64store128_mask:
12951   case X86::BI__builtin_ia32_storeaps128_mask:
12952   case X86::BI__builtin_ia32_storeapd128_mask:
12953   case X86::BI__builtin_ia32_movdqa32store256_mask:
12954   case X86::BI__builtin_ia32_movdqa64store256_mask:
12955   case X86::BI__builtin_ia32_storeaps256_mask:
12956   case X86::BI__builtin_ia32_storeapd256_mask:
12957   case X86::BI__builtin_ia32_movdqa32store512_mask:
12958   case X86::BI__builtin_ia32_movdqa64store512_mask:
12959   case X86::BI__builtin_ia32_storeaps512_mask:
12960   case X86::BI__builtin_ia32_storeapd512_mask:
12961     return EmitX86MaskedStore(
12962         *this, Ops,
12963         getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
12964 
12965   case X86::BI__builtin_ia32_loadups128_mask:
12966   case X86::BI__builtin_ia32_loadups256_mask:
12967   case X86::BI__builtin_ia32_loadups512_mask:
12968   case X86::BI__builtin_ia32_loadupd128_mask:
12969   case X86::BI__builtin_ia32_loadupd256_mask:
12970   case X86::BI__builtin_ia32_loadupd512_mask:
12971   case X86::BI__builtin_ia32_loaddquqi128_mask:
12972   case X86::BI__builtin_ia32_loaddquqi256_mask:
12973   case X86::BI__builtin_ia32_loaddquqi512_mask:
12974   case X86::BI__builtin_ia32_loaddquhi128_mask:
12975   case X86::BI__builtin_ia32_loaddquhi256_mask:
12976   case X86::BI__builtin_ia32_loaddquhi512_mask:
12977   case X86::BI__builtin_ia32_loaddqusi128_mask:
12978   case X86::BI__builtin_ia32_loaddqusi256_mask:
12979   case X86::BI__builtin_ia32_loaddqusi512_mask:
12980   case X86::BI__builtin_ia32_loaddqudi128_mask:
12981   case X86::BI__builtin_ia32_loaddqudi256_mask:
12982   case X86::BI__builtin_ia32_loaddqudi512_mask:
12983     return EmitX86MaskedLoad(*this, Ops, Align(1));
12984 
12985   case X86::BI__builtin_ia32_loadsh128_mask:
12986   case X86::BI__builtin_ia32_loadss128_mask:
12987   case X86::BI__builtin_ia32_loadsd128_mask:
12988     return EmitX86MaskedLoad(*this, Ops, Align(1));
12989 
12990   case X86::BI__builtin_ia32_loadaps128_mask:
12991   case X86::BI__builtin_ia32_loadaps256_mask:
12992   case X86::BI__builtin_ia32_loadaps512_mask:
12993   case X86::BI__builtin_ia32_loadapd128_mask:
12994   case X86::BI__builtin_ia32_loadapd256_mask:
12995   case X86::BI__builtin_ia32_loadapd512_mask:
12996   case X86::BI__builtin_ia32_movdqa32load128_mask:
12997   case X86::BI__builtin_ia32_movdqa32load256_mask:
12998   case X86::BI__builtin_ia32_movdqa32load512_mask:
12999   case X86::BI__builtin_ia32_movdqa64load128_mask:
13000   case X86::BI__builtin_ia32_movdqa64load256_mask:
13001   case X86::BI__builtin_ia32_movdqa64load512_mask:
13002     return EmitX86MaskedLoad(
13003         *this, Ops,
13004         getContext().getTypeAlignInChars(E->getArg(1)->getType()).getAsAlign());
13005 
13006   case X86::BI__builtin_ia32_expandloaddf128_mask:
13007   case X86::BI__builtin_ia32_expandloaddf256_mask:
13008   case X86::BI__builtin_ia32_expandloaddf512_mask:
13009   case X86::BI__builtin_ia32_expandloadsf128_mask:
13010   case X86::BI__builtin_ia32_expandloadsf256_mask:
13011   case X86::BI__builtin_ia32_expandloadsf512_mask:
13012   case X86::BI__builtin_ia32_expandloaddi128_mask:
13013   case X86::BI__builtin_ia32_expandloaddi256_mask:
13014   case X86::BI__builtin_ia32_expandloaddi512_mask:
13015   case X86::BI__builtin_ia32_expandloadsi128_mask:
13016   case X86::BI__builtin_ia32_expandloadsi256_mask:
13017   case X86::BI__builtin_ia32_expandloadsi512_mask:
13018   case X86::BI__builtin_ia32_expandloadhi128_mask:
13019   case X86::BI__builtin_ia32_expandloadhi256_mask:
13020   case X86::BI__builtin_ia32_expandloadhi512_mask:
13021   case X86::BI__builtin_ia32_expandloadqi128_mask:
13022   case X86::BI__builtin_ia32_expandloadqi256_mask:
13023   case X86::BI__builtin_ia32_expandloadqi512_mask:
13024     return EmitX86ExpandLoad(*this, Ops);
13025 
13026   case X86::BI__builtin_ia32_compressstoredf128_mask:
13027   case X86::BI__builtin_ia32_compressstoredf256_mask:
13028   case X86::BI__builtin_ia32_compressstoredf512_mask:
13029   case X86::BI__builtin_ia32_compressstoresf128_mask:
13030   case X86::BI__builtin_ia32_compressstoresf256_mask:
13031   case X86::BI__builtin_ia32_compressstoresf512_mask:
13032   case X86::BI__builtin_ia32_compressstoredi128_mask:
13033   case X86::BI__builtin_ia32_compressstoredi256_mask:
13034   case X86::BI__builtin_ia32_compressstoredi512_mask:
13035   case X86::BI__builtin_ia32_compressstoresi128_mask:
13036   case X86::BI__builtin_ia32_compressstoresi256_mask:
13037   case X86::BI__builtin_ia32_compressstoresi512_mask:
13038   case X86::BI__builtin_ia32_compressstorehi128_mask:
13039   case X86::BI__builtin_ia32_compressstorehi256_mask:
13040   case X86::BI__builtin_ia32_compressstorehi512_mask:
13041   case X86::BI__builtin_ia32_compressstoreqi128_mask:
13042   case X86::BI__builtin_ia32_compressstoreqi256_mask:
13043   case X86::BI__builtin_ia32_compressstoreqi512_mask:
13044     return EmitX86CompressStore(*this, Ops);
13045 
13046   case X86::BI__builtin_ia32_expanddf128_mask:
13047   case X86::BI__builtin_ia32_expanddf256_mask:
13048   case X86::BI__builtin_ia32_expanddf512_mask:
13049   case X86::BI__builtin_ia32_expandsf128_mask:
13050   case X86::BI__builtin_ia32_expandsf256_mask:
13051   case X86::BI__builtin_ia32_expandsf512_mask:
13052   case X86::BI__builtin_ia32_expanddi128_mask:
13053   case X86::BI__builtin_ia32_expanddi256_mask:
13054   case X86::BI__builtin_ia32_expanddi512_mask:
13055   case X86::BI__builtin_ia32_expandsi128_mask:
13056   case X86::BI__builtin_ia32_expandsi256_mask:
13057   case X86::BI__builtin_ia32_expandsi512_mask:
13058   case X86::BI__builtin_ia32_expandhi128_mask:
13059   case X86::BI__builtin_ia32_expandhi256_mask:
13060   case X86::BI__builtin_ia32_expandhi512_mask:
13061   case X86::BI__builtin_ia32_expandqi128_mask:
13062   case X86::BI__builtin_ia32_expandqi256_mask:
13063   case X86::BI__builtin_ia32_expandqi512_mask:
13064     return EmitX86CompressExpand(*this, Ops, /*IsCompress*/false);
13065 
13066   case X86::BI__builtin_ia32_compressdf128_mask:
13067   case X86::BI__builtin_ia32_compressdf256_mask:
13068   case X86::BI__builtin_ia32_compressdf512_mask:
13069   case X86::BI__builtin_ia32_compresssf128_mask:
13070   case X86::BI__builtin_ia32_compresssf256_mask:
13071   case X86::BI__builtin_ia32_compresssf512_mask:
13072   case X86::BI__builtin_ia32_compressdi128_mask:
13073   case X86::BI__builtin_ia32_compressdi256_mask:
13074   case X86::BI__builtin_ia32_compressdi512_mask:
13075   case X86::BI__builtin_ia32_compresssi128_mask:
13076   case X86::BI__builtin_ia32_compresssi256_mask:
13077   case X86::BI__builtin_ia32_compresssi512_mask:
13078   case X86::BI__builtin_ia32_compresshi128_mask:
13079   case X86::BI__builtin_ia32_compresshi256_mask:
13080   case X86::BI__builtin_ia32_compresshi512_mask:
13081   case X86::BI__builtin_ia32_compressqi128_mask:
13082   case X86::BI__builtin_ia32_compressqi256_mask:
13083   case X86::BI__builtin_ia32_compressqi512_mask:
13084     return EmitX86CompressExpand(*this, Ops, /*IsCompress*/true);
13085 
13086   case X86::BI__builtin_ia32_gather3div2df:
13087   case X86::BI__builtin_ia32_gather3div2di:
13088   case X86::BI__builtin_ia32_gather3div4df:
13089   case X86::BI__builtin_ia32_gather3div4di:
13090   case X86::BI__builtin_ia32_gather3div4sf:
13091   case X86::BI__builtin_ia32_gather3div4si:
13092   case X86::BI__builtin_ia32_gather3div8sf:
13093   case X86::BI__builtin_ia32_gather3div8si:
13094   case X86::BI__builtin_ia32_gather3siv2df:
13095   case X86::BI__builtin_ia32_gather3siv2di:
13096   case X86::BI__builtin_ia32_gather3siv4df:
13097   case X86::BI__builtin_ia32_gather3siv4di:
13098   case X86::BI__builtin_ia32_gather3siv4sf:
13099   case X86::BI__builtin_ia32_gather3siv4si:
13100   case X86::BI__builtin_ia32_gather3siv8sf:
13101   case X86::BI__builtin_ia32_gather3siv8si:
13102   case X86::BI__builtin_ia32_gathersiv8df:
13103   case X86::BI__builtin_ia32_gathersiv16sf:
13104   case X86::BI__builtin_ia32_gatherdiv8df:
13105   case X86::BI__builtin_ia32_gatherdiv16sf:
13106   case X86::BI__builtin_ia32_gathersiv8di:
13107   case X86::BI__builtin_ia32_gathersiv16si:
13108   case X86::BI__builtin_ia32_gatherdiv8di:
13109   case X86::BI__builtin_ia32_gatherdiv16si: {
13110     Intrinsic::ID IID;
13111     switch (BuiltinID) {
13112     default: llvm_unreachable("Unexpected builtin");
13113     case X86::BI__builtin_ia32_gather3div2df:
13114       IID = Intrinsic::x86_avx512_mask_gather3div2_df;
13115       break;
13116     case X86::BI__builtin_ia32_gather3div2di:
13117       IID = Intrinsic::x86_avx512_mask_gather3div2_di;
13118       break;
13119     case X86::BI__builtin_ia32_gather3div4df:
13120       IID = Intrinsic::x86_avx512_mask_gather3div4_df;
13121       break;
13122     case X86::BI__builtin_ia32_gather3div4di:
13123       IID = Intrinsic::x86_avx512_mask_gather3div4_di;
13124       break;
13125     case X86::BI__builtin_ia32_gather3div4sf:
13126       IID = Intrinsic::x86_avx512_mask_gather3div4_sf;
13127       break;
13128     case X86::BI__builtin_ia32_gather3div4si:
13129       IID = Intrinsic::x86_avx512_mask_gather3div4_si;
13130       break;
13131     case X86::BI__builtin_ia32_gather3div8sf:
13132       IID = Intrinsic::x86_avx512_mask_gather3div8_sf;
13133       break;
13134     case X86::BI__builtin_ia32_gather3div8si:
13135       IID = Intrinsic::x86_avx512_mask_gather3div8_si;
13136       break;
13137     case X86::BI__builtin_ia32_gather3siv2df:
13138       IID = Intrinsic::x86_avx512_mask_gather3siv2_df;
13139       break;
13140     case X86::BI__builtin_ia32_gather3siv2di:
13141       IID = Intrinsic::x86_avx512_mask_gather3siv2_di;
13142       break;
13143     case X86::BI__builtin_ia32_gather3siv4df:
13144       IID = Intrinsic::x86_avx512_mask_gather3siv4_df;
13145       break;
13146     case X86::BI__builtin_ia32_gather3siv4di:
13147       IID = Intrinsic::x86_avx512_mask_gather3siv4_di;
13148       break;
13149     case X86::BI__builtin_ia32_gather3siv4sf:
13150       IID = Intrinsic::x86_avx512_mask_gather3siv4_sf;
13151       break;
13152     case X86::BI__builtin_ia32_gather3siv4si:
13153       IID = Intrinsic::x86_avx512_mask_gather3siv4_si;
13154       break;
13155     case X86::BI__builtin_ia32_gather3siv8sf:
13156       IID = Intrinsic::x86_avx512_mask_gather3siv8_sf;
13157       break;
13158     case X86::BI__builtin_ia32_gather3siv8si:
13159       IID = Intrinsic::x86_avx512_mask_gather3siv8_si;
13160       break;
13161     case X86::BI__builtin_ia32_gathersiv8df:
13162       IID = Intrinsic::x86_avx512_mask_gather_dpd_512;
13163       break;
13164     case X86::BI__builtin_ia32_gathersiv16sf:
13165       IID = Intrinsic::x86_avx512_mask_gather_dps_512;
13166       break;
13167     case X86::BI__builtin_ia32_gatherdiv8df:
13168       IID = Intrinsic::x86_avx512_mask_gather_qpd_512;
13169       break;
13170     case X86::BI__builtin_ia32_gatherdiv16sf:
13171       IID = Intrinsic::x86_avx512_mask_gather_qps_512;
13172       break;
13173     case X86::BI__builtin_ia32_gathersiv8di:
13174       IID = Intrinsic::x86_avx512_mask_gather_dpq_512;
13175       break;
13176     case X86::BI__builtin_ia32_gathersiv16si:
13177       IID = Intrinsic::x86_avx512_mask_gather_dpi_512;
13178       break;
13179     case X86::BI__builtin_ia32_gatherdiv8di:
13180       IID = Intrinsic::x86_avx512_mask_gather_qpq_512;
13181       break;
13182     case X86::BI__builtin_ia32_gatherdiv16si:
13183       IID = Intrinsic::x86_avx512_mask_gather_qpi_512;
13184       break;
13185     }
13186 
13187     unsigned MinElts = std::min(
13188         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements(),
13189         cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements());
13190     Ops[3] = getMaskVecValue(*this, Ops[3], MinElts);
13191     Function *Intr = CGM.getIntrinsic(IID);
13192     return Builder.CreateCall(Intr, Ops);
13193   }
13194 
13195   case X86::BI__builtin_ia32_scattersiv8df:
13196   case X86::BI__builtin_ia32_scattersiv16sf:
13197   case X86::BI__builtin_ia32_scatterdiv8df:
13198   case X86::BI__builtin_ia32_scatterdiv16sf:
13199   case X86::BI__builtin_ia32_scattersiv8di:
13200   case X86::BI__builtin_ia32_scattersiv16si:
13201   case X86::BI__builtin_ia32_scatterdiv8di:
13202   case X86::BI__builtin_ia32_scatterdiv16si:
13203   case X86::BI__builtin_ia32_scatterdiv2df:
13204   case X86::BI__builtin_ia32_scatterdiv2di:
13205   case X86::BI__builtin_ia32_scatterdiv4df:
13206   case X86::BI__builtin_ia32_scatterdiv4di:
13207   case X86::BI__builtin_ia32_scatterdiv4sf:
13208   case X86::BI__builtin_ia32_scatterdiv4si:
13209   case X86::BI__builtin_ia32_scatterdiv8sf:
13210   case X86::BI__builtin_ia32_scatterdiv8si:
13211   case X86::BI__builtin_ia32_scattersiv2df:
13212   case X86::BI__builtin_ia32_scattersiv2di:
13213   case X86::BI__builtin_ia32_scattersiv4df:
13214   case X86::BI__builtin_ia32_scattersiv4di:
13215   case X86::BI__builtin_ia32_scattersiv4sf:
13216   case X86::BI__builtin_ia32_scattersiv4si:
13217   case X86::BI__builtin_ia32_scattersiv8sf:
13218   case X86::BI__builtin_ia32_scattersiv8si: {
13219     Intrinsic::ID IID;
13220     switch (BuiltinID) {
13221     default: llvm_unreachable("Unexpected builtin");
13222     case X86::BI__builtin_ia32_scattersiv8df:
13223       IID = Intrinsic::x86_avx512_mask_scatter_dpd_512;
13224       break;
13225     case X86::BI__builtin_ia32_scattersiv16sf:
13226       IID = Intrinsic::x86_avx512_mask_scatter_dps_512;
13227       break;
13228     case X86::BI__builtin_ia32_scatterdiv8df:
13229       IID = Intrinsic::x86_avx512_mask_scatter_qpd_512;
13230       break;
13231     case X86::BI__builtin_ia32_scatterdiv16sf:
13232       IID = Intrinsic::x86_avx512_mask_scatter_qps_512;
13233       break;
13234     case X86::BI__builtin_ia32_scattersiv8di:
13235       IID = Intrinsic::x86_avx512_mask_scatter_dpq_512;
13236       break;
13237     case X86::BI__builtin_ia32_scattersiv16si:
13238       IID = Intrinsic::x86_avx512_mask_scatter_dpi_512;
13239       break;
13240     case X86::BI__builtin_ia32_scatterdiv8di:
13241       IID = Intrinsic::x86_avx512_mask_scatter_qpq_512;
13242       break;
13243     case X86::BI__builtin_ia32_scatterdiv16si:
13244       IID = Intrinsic::x86_avx512_mask_scatter_qpi_512;
13245       break;
13246     case X86::BI__builtin_ia32_scatterdiv2df:
13247       IID = Intrinsic::x86_avx512_mask_scatterdiv2_df;
13248       break;
13249     case X86::BI__builtin_ia32_scatterdiv2di:
13250       IID = Intrinsic::x86_avx512_mask_scatterdiv2_di;
13251       break;
13252     case X86::BI__builtin_ia32_scatterdiv4df:
13253       IID = Intrinsic::x86_avx512_mask_scatterdiv4_df;
13254       break;
13255     case X86::BI__builtin_ia32_scatterdiv4di:
13256       IID = Intrinsic::x86_avx512_mask_scatterdiv4_di;
13257       break;
13258     case X86::BI__builtin_ia32_scatterdiv4sf:
13259       IID = Intrinsic::x86_avx512_mask_scatterdiv4_sf;
13260       break;
13261     case X86::BI__builtin_ia32_scatterdiv4si:
13262       IID = Intrinsic::x86_avx512_mask_scatterdiv4_si;
13263       break;
13264     case X86::BI__builtin_ia32_scatterdiv8sf:
13265       IID = Intrinsic::x86_avx512_mask_scatterdiv8_sf;
13266       break;
13267     case X86::BI__builtin_ia32_scatterdiv8si:
13268       IID = Intrinsic::x86_avx512_mask_scatterdiv8_si;
13269       break;
13270     case X86::BI__builtin_ia32_scattersiv2df:
13271       IID = Intrinsic::x86_avx512_mask_scattersiv2_df;
13272       break;
13273     case X86::BI__builtin_ia32_scattersiv2di:
13274       IID = Intrinsic::x86_avx512_mask_scattersiv2_di;
13275       break;
13276     case X86::BI__builtin_ia32_scattersiv4df:
13277       IID = Intrinsic::x86_avx512_mask_scattersiv4_df;
13278       break;
13279     case X86::BI__builtin_ia32_scattersiv4di:
13280       IID = Intrinsic::x86_avx512_mask_scattersiv4_di;
13281       break;
13282     case X86::BI__builtin_ia32_scattersiv4sf:
13283       IID = Intrinsic::x86_avx512_mask_scattersiv4_sf;
13284       break;
13285     case X86::BI__builtin_ia32_scattersiv4si:
13286       IID = Intrinsic::x86_avx512_mask_scattersiv4_si;
13287       break;
13288     case X86::BI__builtin_ia32_scattersiv8sf:
13289       IID = Intrinsic::x86_avx512_mask_scattersiv8_sf;
13290       break;
13291     case X86::BI__builtin_ia32_scattersiv8si:
13292       IID = Intrinsic::x86_avx512_mask_scattersiv8_si;
13293       break;
13294     }
13295 
13296     unsigned MinElts = std::min(
13297         cast<llvm::FixedVectorType>(Ops[2]->getType())->getNumElements(),
13298         cast<llvm::FixedVectorType>(Ops[3]->getType())->getNumElements());
13299     Ops[1] = getMaskVecValue(*this, Ops[1], MinElts);
13300     Function *Intr = CGM.getIntrinsic(IID);
13301     return Builder.CreateCall(Intr, Ops);
13302   }
13303 
13304   case X86::BI__builtin_ia32_vextractf128_pd256:
13305   case X86::BI__builtin_ia32_vextractf128_ps256:
13306   case X86::BI__builtin_ia32_vextractf128_si256:
13307   case X86::BI__builtin_ia32_extract128i256:
13308   case X86::BI__builtin_ia32_extractf64x4_mask:
13309   case X86::BI__builtin_ia32_extractf32x4_mask:
13310   case X86::BI__builtin_ia32_extracti64x4_mask:
13311   case X86::BI__builtin_ia32_extracti32x4_mask:
13312   case X86::BI__builtin_ia32_extractf32x8_mask:
13313   case X86::BI__builtin_ia32_extracti32x8_mask:
13314   case X86::BI__builtin_ia32_extractf32x4_256_mask:
13315   case X86::BI__builtin_ia32_extracti32x4_256_mask:
13316   case X86::BI__builtin_ia32_extractf64x2_256_mask:
13317   case X86::BI__builtin_ia32_extracti64x2_256_mask:
13318   case X86::BI__builtin_ia32_extractf64x2_512_mask:
13319   case X86::BI__builtin_ia32_extracti64x2_512_mask: {
13320     auto *DstTy = cast<llvm::FixedVectorType>(ConvertType(E->getType()));
13321     unsigned NumElts = DstTy->getNumElements();
13322     unsigned SrcNumElts =
13323         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13324     unsigned SubVectors = SrcNumElts / NumElts;
13325     unsigned Index = cast<ConstantInt>(Ops[1])->getZExtValue();
13326     assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
13327     Index &= SubVectors - 1; // Remove any extra bits.
13328     Index *= NumElts;
13329 
13330     int Indices[16];
13331     for (unsigned i = 0; i != NumElts; ++i)
13332       Indices[i] = i + Index;
13333 
13334     Value *Res = Builder.CreateShuffleVector(Ops[0],
13335                                              makeArrayRef(Indices, NumElts),
13336                                              "extract");
13337 
13338     if (Ops.size() == 4)
13339       Res = EmitX86Select(*this, Ops[3], Res, Ops[2]);
13340 
13341     return Res;
13342   }
13343   case X86::BI__builtin_ia32_vinsertf128_pd256:
13344   case X86::BI__builtin_ia32_vinsertf128_ps256:
13345   case X86::BI__builtin_ia32_vinsertf128_si256:
13346   case X86::BI__builtin_ia32_insert128i256:
13347   case X86::BI__builtin_ia32_insertf64x4:
13348   case X86::BI__builtin_ia32_insertf32x4:
13349   case X86::BI__builtin_ia32_inserti64x4:
13350   case X86::BI__builtin_ia32_inserti32x4:
13351   case X86::BI__builtin_ia32_insertf32x8:
13352   case X86::BI__builtin_ia32_inserti32x8:
13353   case X86::BI__builtin_ia32_insertf32x4_256:
13354   case X86::BI__builtin_ia32_inserti32x4_256:
13355   case X86::BI__builtin_ia32_insertf64x2_256:
13356   case X86::BI__builtin_ia32_inserti64x2_256:
13357   case X86::BI__builtin_ia32_insertf64x2_512:
13358   case X86::BI__builtin_ia32_inserti64x2_512: {
13359     unsigned DstNumElts =
13360         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13361     unsigned SrcNumElts =
13362         cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements();
13363     unsigned SubVectors = DstNumElts / SrcNumElts;
13364     unsigned Index = cast<ConstantInt>(Ops[2])->getZExtValue();
13365     assert(llvm::isPowerOf2_32(SubVectors) && "Expected power of 2 subvectors");
13366     Index &= SubVectors - 1; // Remove any extra bits.
13367     Index *= SrcNumElts;
13368 
13369     int Indices[16];
13370     for (unsigned i = 0; i != DstNumElts; ++i)
13371       Indices[i] = (i >= SrcNumElts) ? SrcNumElts + (i % SrcNumElts) : i;
13372 
13373     Value *Op1 = Builder.CreateShuffleVector(Ops[1],
13374                                              makeArrayRef(Indices, DstNumElts),
13375                                              "widen");
13376 
13377     for (unsigned i = 0; i != DstNumElts; ++i) {
13378       if (i >= Index && i < (Index + SrcNumElts))
13379         Indices[i] = (i - Index) + DstNumElts;
13380       else
13381         Indices[i] = i;
13382     }
13383 
13384     return Builder.CreateShuffleVector(Ops[0], Op1,
13385                                        makeArrayRef(Indices, DstNumElts),
13386                                        "insert");
13387   }
13388   case X86::BI__builtin_ia32_pmovqd512_mask:
13389   case X86::BI__builtin_ia32_pmovwb512_mask: {
13390     Value *Res = Builder.CreateTrunc(Ops[0], Ops[1]->getType());
13391     return EmitX86Select(*this, Ops[2], Res, Ops[1]);
13392   }
13393   case X86::BI__builtin_ia32_pmovdb512_mask:
13394   case X86::BI__builtin_ia32_pmovdw512_mask:
13395   case X86::BI__builtin_ia32_pmovqw512_mask: {
13396     if (const auto *C = dyn_cast<Constant>(Ops[2]))
13397       if (C->isAllOnesValue())
13398         return Builder.CreateTrunc(Ops[0], Ops[1]->getType());
13399 
13400     Intrinsic::ID IID;
13401     switch (BuiltinID) {
13402     default: llvm_unreachable("Unsupported intrinsic!");
13403     case X86::BI__builtin_ia32_pmovdb512_mask:
13404       IID = Intrinsic::x86_avx512_mask_pmov_db_512;
13405       break;
13406     case X86::BI__builtin_ia32_pmovdw512_mask:
13407       IID = Intrinsic::x86_avx512_mask_pmov_dw_512;
13408       break;
13409     case X86::BI__builtin_ia32_pmovqw512_mask:
13410       IID = Intrinsic::x86_avx512_mask_pmov_qw_512;
13411       break;
13412     }
13413 
13414     Function *Intr = CGM.getIntrinsic(IID);
13415     return Builder.CreateCall(Intr, Ops);
13416   }
13417   case X86::BI__builtin_ia32_pblendw128:
13418   case X86::BI__builtin_ia32_blendpd:
13419   case X86::BI__builtin_ia32_blendps:
13420   case X86::BI__builtin_ia32_blendpd256:
13421   case X86::BI__builtin_ia32_blendps256:
13422   case X86::BI__builtin_ia32_pblendw256:
13423   case X86::BI__builtin_ia32_pblendd128:
13424   case X86::BI__builtin_ia32_pblendd256: {
13425     unsigned NumElts =
13426         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13427     unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13428 
13429     int Indices[16];
13430     // If there are more than 8 elements, the immediate is used twice so make
13431     // sure we handle that.
13432     for (unsigned i = 0; i != NumElts; ++i)
13433       Indices[i] = ((Imm >> (i % 8)) & 0x1) ? NumElts + i : i;
13434 
13435     return Builder.CreateShuffleVector(Ops[0], Ops[1],
13436                                        makeArrayRef(Indices, NumElts),
13437                                        "blend");
13438   }
13439   case X86::BI__builtin_ia32_pshuflw:
13440   case X86::BI__builtin_ia32_pshuflw256:
13441   case X86::BI__builtin_ia32_pshuflw512: {
13442     uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13443     auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13444     unsigned NumElts = Ty->getNumElements();
13445 
13446     // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13447     Imm = (Imm & 0xff) * 0x01010101;
13448 
13449     int Indices[32];
13450     for (unsigned l = 0; l != NumElts; l += 8) {
13451       for (unsigned i = 0; i != 4; ++i) {
13452         Indices[l + i] = l + (Imm & 3);
13453         Imm >>= 2;
13454       }
13455       for (unsigned i = 4; i != 8; ++i)
13456         Indices[l + i] = l + i;
13457     }
13458 
13459     return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13460                                        "pshuflw");
13461   }
13462   case X86::BI__builtin_ia32_pshufhw:
13463   case X86::BI__builtin_ia32_pshufhw256:
13464   case X86::BI__builtin_ia32_pshufhw512: {
13465     uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13466     auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13467     unsigned NumElts = Ty->getNumElements();
13468 
13469     // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13470     Imm = (Imm & 0xff) * 0x01010101;
13471 
13472     int Indices[32];
13473     for (unsigned l = 0; l != NumElts; l += 8) {
13474       for (unsigned i = 0; i != 4; ++i)
13475         Indices[l + i] = l + i;
13476       for (unsigned i = 4; i != 8; ++i) {
13477         Indices[l + i] = l + 4 + (Imm & 3);
13478         Imm >>= 2;
13479       }
13480     }
13481 
13482     return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13483                                        "pshufhw");
13484   }
13485   case X86::BI__builtin_ia32_pshufd:
13486   case X86::BI__builtin_ia32_pshufd256:
13487   case X86::BI__builtin_ia32_pshufd512:
13488   case X86::BI__builtin_ia32_vpermilpd:
13489   case X86::BI__builtin_ia32_vpermilps:
13490   case X86::BI__builtin_ia32_vpermilpd256:
13491   case X86::BI__builtin_ia32_vpermilps256:
13492   case X86::BI__builtin_ia32_vpermilpd512:
13493   case X86::BI__builtin_ia32_vpermilps512: {
13494     uint32_t Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13495     auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13496     unsigned NumElts = Ty->getNumElements();
13497     unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
13498     unsigned NumLaneElts = NumElts / NumLanes;
13499 
13500     // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13501     Imm = (Imm & 0xff) * 0x01010101;
13502 
13503     int Indices[16];
13504     for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13505       for (unsigned i = 0; i != NumLaneElts; ++i) {
13506         Indices[i + l] = (Imm % NumLaneElts) + l;
13507         Imm /= NumLaneElts;
13508       }
13509     }
13510 
13511     return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13512                                        "permil");
13513   }
13514   case X86::BI__builtin_ia32_shufpd:
13515   case X86::BI__builtin_ia32_shufpd256:
13516   case X86::BI__builtin_ia32_shufpd512:
13517   case X86::BI__builtin_ia32_shufps:
13518   case X86::BI__builtin_ia32_shufps256:
13519   case X86::BI__builtin_ia32_shufps512: {
13520     uint32_t Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13521     auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13522     unsigned NumElts = Ty->getNumElements();
13523     unsigned NumLanes = Ty->getPrimitiveSizeInBits() / 128;
13524     unsigned NumLaneElts = NumElts / NumLanes;
13525 
13526     // Splat the 8-bits of immediate 4 times to help the loop wrap around.
13527     Imm = (Imm & 0xff) * 0x01010101;
13528 
13529     int Indices[16];
13530     for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13531       for (unsigned i = 0; i != NumLaneElts; ++i) {
13532         unsigned Index = Imm % NumLaneElts;
13533         Imm /= NumLaneElts;
13534         if (i >= (NumLaneElts / 2))
13535           Index += NumElts;
13536         Indices[l + i] = l + Index;
13537       }
13538     }
13539 
13540     return Builder.CreateShuffleVector(Ops[0], Ops[1],
13541                                        makeArrayRef(Indices, NumElts),
13542                                        "shufp");
13543   }
13544   case X86::BI__builtin_ia32_permdi256:
13545   case X86::BI__builtin_ia32_permdf256:
13546   case X86::BI__builtin_ia32_permdi512:
13547   case X86::BI__builtin_ia32_permdf512: {
13548     unsigned Imm = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
13549     auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13550     unsigned NumElts = Ty->getNumElements();
13551 
13552     // These intrinsics operate on 256-bit lanes of four 64-bit elements.
13553     int Indices[8];
13554     for (unsigned l = 0; l != NumElts; l += 4)
13555       for (unsigned i = 0; i != 4; ++i)
13556         Indices[l + i] = l + ((Imm >> (2 * i)) & 0x3);
13557 
13558     return Builder.CreateShuffleVector(Ops[0], makeArrayRef(Indices, NumElts),
13559                                        "perm");
13560   }
13561   case X86::BI__builtin_ia32_palignr128:
13562   case X86::BI__builtin_ia32_palignr256:
13563   case X86::BI__builtin_ia32_palignr512: {
13564     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
13565 
13566     unsigned NumElts =
13567         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13568     assert(NumElts % 16 == 0);
13569 
13570     // If palignr is shifting the pair of vectors more than the size of two
13571     // lanes, emit zero.
13572     if (ShiftVal >= 32)
13573       return llvm::Constant::getNullValue(ConvertType(E->getType()));
13574 
13575     // If palignr is shifting the pair of input vectors more than one lane,
13576     // but less than two lanes, convert to shifting in zeroes.
13577     if (ShiftVal > 16) {
13578       ShiftVal -= 16;
13579       Ops[1] = Ops[0];
13580       Ops[0] = llvm::Constant::getNullValue(Ops[0]->getType());
13581     }
13582 
13583     int Indices[64];
13584     // 256-bit palignr operates on 128-bit lanes so we need to handle that
13585     for (unsigned l = 0; l != NumElts; l += 16) {
13586       for (unsigned i = 0; i != 16; ++i) {
13587         unsigned Idx = ShiftVal + i;
13588         if (Idx >= 16)
13589           Idx += NumElts - 16; // End of lane, switch operand.
13590         Indices[l + i] = Idx + l;
13591       }
13592     }
13593 
13594     return Builder.CreateShuffleVector(Ops[1], Ops[0],
13595                                        makeArrayRef(Indices, NumElts),
13596                                        "palignr");
13597   }
13598   case X86::BI__builtin_ia32_alignd128:
13599   case X86::BI__builtin_ia32_alignd256:
13600   case X86::BI__builtin_ia32_alignd512:
13601   case X86::BI__builtin_ia32_alignq128:
13602   case X86::BI__builtin_ia32_alignq256:
13603   case X86::BI__builtin_ia32_alignq512: {
13604     unsigned NumElts =
13605         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13606     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
13607 
13608     // Mask the shift amount to width of a vector.
13609     ShiftVal &= NumElts - 1;
13610 
13611     int Indices[16];
13612     for (unsigned i = 0; i != NumElts; ++i)
13613       Indices[i] = i + ShiftVal;
13614 
13615     return Builder.CreateShuffleVector(Ops[1], Ops[0],
13616                                        makeArrayRef(Indices, NumElts),
13617                                        "valign");
13618   }
13619   case X86::BI__builtin_ia32_shuf_f32x4_256:
13620   case X86::BI__builtin_ia32_shuf_f64x2_256:
13621   case X86::BI__builtin_ia32_shuf_i32x4_256:
13622   case X86::BI__builtin_ia32_shuf_i64x2_256:
13623   case X86::BI__builtin_ia32_shuf_f32x4:
13624   case X86::BI__builtin_ia32_shuf_f64x2:
13625   case X86::BI__builtin_ia32_shuf_i32x4:
13626   case X86::BI__builtin_ia32_shuf_i64x2: {
13627     unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13628     auto *Ty = cast<llvm::FixedVectorType>(Ops[0]->getType());
13629     unsigned NumElts = Ty->getNumElements();
13630     unsigned NumLanes = Ty->getPrimitiveSizeInBits() == 512 ? 4 : 2;
13631     unsigned NumLaneElts = NumElts / NumLanes;
13632 
13633     int Indices[16];
13634     for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
13635       unsigned Index = (Imm % NumLanes) * NumLaneElts;
13636       Imm /= NumLanes; // Discard the bits we just used.
13637       if (l >= (NumElts / 2))
13638         Index += NumElts; // Switch to other source.
13639       for (unsigned i = 0; i != NumLaneElts; ++i) {
13640         Indices[l + i] = Index + i;
13641       }
13642     }
13643 
13644     return Builder.CreateShuffleVector(Ops[0], Ops[1],
13645                                        makeArrayRef(Indices, NumElts),
13646                                        "shuf");
13647   }
13648 
13649   case X86::BI__builtin_ia32_vperm2f128_pd256:
13650   case X86::BI__builtin_ia32_vperm2f128_ps256:
13651   case X86::BI__builtin_ia32_vperm2f128_si256:
13652   case X86::BI__builtin_ia32_permti256: {
13653     unsigned Imm = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
13654     unsigned NumElts =
13655         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
13656 
13657     // This takes a very simple approach since there are two lanes and a
13658     // shuffle can have 2 inputs. So we reserve the first input for the first
13659     // lane and the second input for the second lane. This may result in
13660     // duplicate sources, but this can be dealt with in the backend.
13661 
13662     Value *OutOps[2];
13663     int Indices[8];
13664     for (unsigned l = 0; l != 2; ++l) {
13665       // Determine the source for this lane.
13666       if (Imm & (1 << ((l * 4) + 3)))
13667         OutOps[l] = llvm::ConstantAggregateZero::get(Ops[0]->getType());
13668       else if (Imm & (1 << ((l * 4) + 1)))
13669         OutOps[l] = Ops[1];
13670       else
13671         OutOps[l] = Ops[0];
13672 
13673       for (unsigned i = 0; i != NumElts/2; ++i) {
13674         // Start with ith element of the source for this lane.
13675         unsigned Idx = (l * NumElts) + i;
13676         // If bit 0 of the immediate half is set, switch to the high half of
13677         // the source.
13678         if (Imm & (1 << (l * 4)))
13679           Idx += NumElts/2;
13680         Indices[(l * (NumElts/2)) + i] = Idx;
13681       }
13682     }
13683 
13684     return Builder.CreateShuffleVector(OutOps[0], OutOps[1],
13685                                        makeArrayRef(Indices, NumElts),
13686                                        "vperm");
13687   }
13688 
13689   case X86::BI__builtin_ia32_pslldqi128_byteshift:
13690   case X86::BI__builtin_ia32_pslldqi256_byteshift:
13691   case X86::BI__builtin_ia32_pslldqi512_byteshift: {
13692     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13693     auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13694     // Builtin type is vXi64 so multiply by 8 to get bytes.
13695     unsigned NumElts = ResultType->getNumElements() * 8;
13696 
13697     // If pslldq is shifting the vector more than 15 bytes, emit zero.
13698     if (ShiftVal >= 16)
13699       return llvm::Constant::getNullValue(ResultType);
13700 
13701     int Indices[64];
13702     // 256/512-bit pslldq operates on 128-bit lanes so we need to handle that
13703     for (unsigned l = 0; l != NumElts; l += 16) {
13704       for (unsigned i = 0; i != 16; ++i) {
13705         unsigned Idx = NumElts + i - ShiftVal;
13706         if (Idx < NumElts) Idx -= NumElts - 16; // end of lane, switch operand.
13707         Indices[l + i] = Idx + l;
13708       }
13709     }
13710 
13711     auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13712     Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13713     Value *Zero = llvm::Constant::getNullValue(VecTy);
13714     Value *SV = Builder.CreateShuffleVector(Zero, Cast,
13715                                             makeArrayRef(Indices, NumElts),
13716                                             "pslldq");
13717     return Builder.CreateBitCast(SV, Ops[0]->getType(), "cast");
13718   }
13719   case X86::BI__builtin_ia32_psrldqi128_byteshift:
13720   case X86::BI__builtin_ia32_psrldqi256_byteshift:
13721   case X86::BI__builtin_ia32_psrldqi512_byteshift: {
13722     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13723     auto *ResultType = cast<llvm::FixedVectorType>(Ops[0]->getType());
13724     // Builtin type is vXi64 so multiply by 8 to get bytes.
13725     unsigned NumElts = ResultType->getNumElements() * 8;
13726 
13727     // If psrldq is shifting the vector more than 15 bytes, emit zero.
13728     if (ShiftVal >= 16)
13729       return llvm::Constant::getNullValue(ResultType);
13730 
13731     int Indices[64];
13732     // 256/512-bit psrldq operates on 128-bit lanes so we need to handle that
13733     for (unsigned l = 0; l != NumElts; l += 16) {
13734       for (unsigned i = 0; i != 16; ++i) {
13735         unsigned Idx = i + ShiftVal;
13736         if (Idx >= 16) Idx += NumElts - 16; // end of lane, switch operand.
13737         Indices[l + i] = Idx + l;
13738       }
13739     }
13740 
13741     auto *VecTy = llvm::FixedVectorType::get(Int8Ty, NumElts);
13742     Value *Cast = Builder.CreateBitCast(Ops[0], VecTy, "cast");
13743     Value *Zero = llvm::Constant::getNullValue(VecTy);
13744     Value *SV = Builder.CreateShuffleVector(Cast, Zero,
13745                                             makeArrayRef(Indices, NumElts),
13746                                             "psrldq");
13747     return Builder.CreateBitCast(SV, ResultType, "cast");
13748   }
13749   case X86::BI__builtin_ia32_kshiftliqi:
13750   case X86::BI__builtin_ia32_kshiftlihi:
13751   case X86::BI__builtin_ia32_kshiftlisi:
13752   case X86::BI__builtin_ia32_kshiftlidi: {
13753     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13754     unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13755 
13756     if (ShiftVal >= NumElts)
13757       return llvm::Constant::getNullValue(Ops[0]->getType());
13758 
13759     Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13760 
13761     int Indices[64];
13762     for (unsigned i = 0; i != NumElts; ++i)
13763       Indices[i] = NumElts + i - ShiftVal;
13764 
13765     Value *Zero = llvm::Constant::getNullValue(In->getType());
13766     Value *SV = Builder.CreateShuffleVector(Zero, In,
13767                                             makeArrayRef(Indices, NumElts),
13768                                             "kshiftl");
13769     return Builder.CreateBitCast(SV, Ops[0]->getType());
13770   }
13771   case X86::BI__builtin_ia32_kshiftriqi:
13772   case X86::BI__builtin_ia32_kshiftrihi:
13773   case X86::BI__builtin_ia32_kshiftrisi:
13774   case X86::BI__builtin_ia32_kshiftridi: {
13775     unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[1])->getZExtValue() & 0xff;
13776     unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13777 
13778     if (ShiftVal >= NumElts)
13779       return llvm::Constant::getNullValue(Ops[0]->getType());
13780 
13781     Value *In = getMaskVecValue(*this, Ops[0], NumElts);
13782 
13783     int Indices[64];
13784     for (unsigned i = 0; i != NumElts; ++i)
13785       Indices[i] = i + ShiftVal;
13786 
13787     Value *Zero = llvm::Constant::getNullValue(In->getType());
13788     Value *SV = Builder.CreateShuffleVector(In, Zero,
13789                                             makeArrayRef(Indices, NumElts),
13790                                             "kshiftr");
13791     return Builder.CreateBitCast(SV, Ops[0]->getType());
13792   }
13793   case X86::BI__builtin_ia32_movnti:
13794   case X86::BI__builtin_ia32_movnti64:
13795   case X86::BI__builtin_ia32_movntsd:
13796   case X86::BI__builtin_ia32_movntss: {
13797     llvm::MDNode *Node = llvm::MDNode::get(
13798         getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
13799 
13800     Value *Ptr = Ops[0];
13801     Value *Src = Ops[1];
13802 
13803     // Extract the 0'th element of the source vector.
13804     if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
13805         BuiltinID == X86::BI__builtin_ia32_movntss)
13806       Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
13807 
13808     // Convert the type of the pointer to a pointer to the stored type.
13809     Value *BC = Builder.CreateBitCast(
13810         Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
13811 
13812     // Unaligned nontemporal store of the scalar value.
13813     StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
13814     SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
13815     SI->setAlignment(llvm::Align(1));
13816     return SI;
13817   }
13818   // Rotate is a special case of funnel shift - 1st 2 args are the same.
13819   case X86::BI__builtin_ia32_vprotb:
13820   case X86::BI__builtin_ia32_vprotw:
13821   case X86::BI__builtin_ia32_vprotd:
13822   case X86::BI__builtin_ia32_vprotq:
13823   case X86::BI__builtin_ia32_vprotbi:
13824   case X86::BI__builtin_ia32_vprotwi:
13825   case X86::BI__builtin_ia32_vprotdi:
13826   case X86::BI__builtin_ia32_vprotqi:
13827   case X86::BI__builtin_ia32_prold128:
13828   case X86::BI__builtin_ia32_prold256:
13829   case X86::BI__builtin_ia32_prold512:
13830   case X86::BI__builtin_ia32_prolq128:
13831   case X86::BI__builtin_ia32_prolq256:
13832   case X86::BI__builtin_ia32_prolq512:
13833   case X86::BI__builtin_ia32_prolvd128:
13834   case X86::BI__builtin_ia32_prolvd256:
13835   case X86::BI__builtin_ia32_prolvd512:
13836   case X86::BI__builtin_ia32_prolvq128:
13837   case X86::BI__builtin_ia32_prolvq256:
13838   case X86::BI__builtin_ia32_prolvq512:
13839     return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], false);
13840   case X86::BI__builtin_ia32_prord128:
13841   case X86::BI__builtin_ia32_prord256:
13842   case X86::BI__builtin_ia32_prord512:
13843   case X86::BI__builtin_ia32_prorq128:
13844   case X86::BI__builtin_ia32_prorq256:
13845   case X86::BI__builtin_ia32_prorq512:
13846   case X86::BI__builtin_ia32_prorvd128:
13847   case X86::BI__builtin_ia32_prorvd256:
13848   case X86::BI__builtin_ia32_prorvd512:
13849   case X86::BI__builtin_ia32_prorvq128:
13850   case X86::BI__builtin_ia32_prorvq256:
13851   case X86::BI__builtin_ia32_prorvq512:
13852     return EmitX86FunnelShift(*this, Ops[0], Ops[0], Ops[1], true);
13853   case X86::BI__builtin_ia32_selectb_128:
13854   case X86::BI__builtin_ia32_selectb_256:
13855   case X86::BI__builtin_ia32_selectb_512:
13856   case X86::BI__builtin_ia32_selectw_128:
13857   case X86::BI__builtin_ia32_selectw_256:
13858   case X86::BI__builtin_ia32_selectw_512:
13859   case X86::BI__builtin_ia32_selectd_128:
13860   case X86::BI__builtin_ia32_selectd_256:
13861   case X86::BI__builtin_ia32_selectd_512:
13862   case X86::BI__builtin_ia32_selectq_128:
13863   case X86::BI__builtin_ia32_selectq_256:
13864   case X86::BI__builtin_ia32_selectq_512:
13865   case X86::BI__builtin_ia32_selectph_128:
13866   case X86::BI__builtin_ia32_selectph_256:
13867   case X86::BI__builtin_ia32_selectph_512:
13868   case X86::BI__builtin_ia32_selectps_128:
13869   case X86::BI__builtin_ia32_selectps_256:
13870   case X86::BI__builtin_ia32_selectps_512:
13871   case X86::BI__builtin_ia32_selectpd_128:
13872   case X86::BI__builtin_ia32_selectpd_256:
13873   case X86::BI__builtin_ia32_selectpd_512:
13874     return EmitX86Select(*this, Ops[0], Ops[1], Ops[2]);
13875   case X86::BI__builtin_ia32_selectsh_128:
13876   case X86::BI__builtin_ia32_selectss_128:
13877   case X86::BI__builtin_ia32_selectsd_128: {
13878     Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
13879     Value *B = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
13880     A = EmitX86ScalarSelect(*this, Ops[0], A, B);
13881     return Builder.CreateInsertElement(Ops[1], A, (uint64_t)0);
13882   }
13883   case X86::BI__builtin_ia32_cmpb128_mask:
13884   case X86::BI__builtin_ia32_cmpb256_mask:
13885   case X86::BI__builtin_ia32_cmpb512_mask:
13886   case X86::BI__builtin_ia32_cmpw128_mask:
13887   case X86::BI__builtin_ia32_cmpw256_mask:
13888   case X86::BI__builtin_ia32_cmpw512_mask:
13889   case X86::BI__builtin_ia32_cmpd128_mask:
13890   case X86::BI__builtin_ia32_cmpd256_mask:
13891   case X86::BI__builtin_ia32_cmpd512_mask:
13892   case X86::BI__builtin_ia32_cmpq128_mask:
13893   case X86::BI__builtin_ia32_cmpq256_mask:
13894   case X86::BI__builtin_ia32_cmpq512_mask: {
13895     unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13896     return EmitX86MaskedCompare(*this, CC, true, Ops);
13897   }
13898   case X86::BI__builtin_ia32_ucmpb128_mask:
13899   case X86::BI__builtin_ia32_ucmpb256_mask:
13900   case X86::BI__builtin_ia32_ucmpb512_mask:
13901   case X86::BI__builtin_ia32_ucmpw128_mask:
13902   case X86::BI__builtin_ia32_ucmpw256_mask:
13903   case X86::BI__builtin_ia32_ucmpw512_mask:
13904   case X86::BI__builtin_ia32_ucmpd128_mask:
13905   case X86::BI__builtin_ia32_ucmpd256_mask:
13906   case X86::BI__builtin_ia32_ucmpd512_mask:
13907   case X86::BI__builtin_ia32_ucmpq128_mask:
13908   case X86::BI__builtin_ia32_ucmpq256_mask:
13909   case X86::BI__builtin_ia32_ucmpq512_mask: {
13910     unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x7;
13911     return EmitX86MaskedCompare(*this, CC, false, Ops);
13912   }
13913   case X86::BI__builtin_ia32_vpcomb:
13914   case X86::BI__builtin_ia32_vpcomw:
13915   case X86::BI__builtin_ia32_vpcomd:
13916   case X86::BI__builtin_ia32_vpcomq:
13917     return EmitX86vpcom(*this, Ops, true);
13918   case X86::BI__builtin_ia32_vpcomub:
13919   case X86::BI__builtin_ia32_vpcomuw:
13920   case X86::BI__builtin_ia32_vpcomud:
13921   case X86::BI__builtin_ia32_vpcomuq:
13922     return EmitX86vpcom(*this, Ops, false);
13923 
13924   case X86::BI__builtin_ia32_kortestcqi:
13925   case X86::BI__builtin_ia32_kortestchi:
13926   case X86::BI__builtin_ia32_kortestcsi:
13927   case X86::BI__builtin_ia32_kortestcdi: {
13928     Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13929     Value *C = llvm::Constant::getAllOnesValue(Ops[0]->getType());
13930     Value *Cmp = Builder.CreateICmpEQ(Or, C);
13931     return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13932   }
13933   case X86::BI__builtin_ia32_kortestzqi:
13934   case X86::BI__builtin_ia32_kortestzhi:
13935   case X86::BI__builtin_ia32_kortestzsi:
13936   case X86::BI__builtin_ia32_kortestzdi: {
13937     Value *Or = EmitX86MaskLogic(*this, Instruction::Or, Ops);
13938     Value *C = llvm::Constant::getNullValue(Ops[0]->getType());
13939     Value *Cmp = Builder.CreateICmpEQ(Or, C);
13940     return Builder.CreateZExt(Cmp, ConvertType(E->getType()));
13941   }
13942 
13943   case X86::BI__builtin_ia32_ktestcqi:
13944   case X86::BI__builtin_ia32_ktestzqi:
13945   case X86::BI__builtin_ia32_ktestchi:
13946   case X86::BI__builtin_ia32_ktestzhi:
13947   case X86::BI__builtin_ia32_ktestcsi:
13948   case X86::BI__builtin_ia32_ktestzsi:
13949   case X86::BI__builtin_ia32_ktestcdi:
13950   case X86::BI__builtin_ia32_ktestzdi: {
13951     Intrinsic::ID IID;
13952     switch (BuiltinID) {
13953     default: llvm_unreachable("Unsupported intrinsic!");
13954     case X86::BI__builtin_ia32_ktestcqi:
13955       IID = Intrinsic::x86_avx512_ktestc_b;
13956       break;
13957     case X86::BI__builtin_ia32_ktestzqi:
13958       IID = Intrinsic::x86_avx512_ktestz_b;
13959       break;
13960     case X86::BI__builtin_ia32_ktestchi:
13961       IID = Intrinsic::x86_avx512_ktestc_w;
13962       break;
13963     case X86::BI__builtin_ia32_ktestzhi:
13964       IID = Intrinsic::x86_avx512_ktestz_w;
13965       break;
13966     case X86::BI__builtin_ia32_ktestcsi:
13967       IID = Intrinsic::x86_avx512_ktestc_d;
13968       break;
13969     case X86::BI__builtin_ia32_ktestzsi:
13970       IID = Intrinsic::x86_avx512_ktestz_d;
13971       break;
13972     case X86::BI__builtin_ia32_ktestcdi:
13973       IID = Intrinsic::x86_avx512_ktestc_q;
13974       break;
13975     case X86::BI__builtin_ia32_ktestzdi:
13976       IID = Intrinsic::x86_avx512_ktestz_q;
13977       break;
13978     }
13979 
13980     unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
13981     Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
13982     Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
13983     Function *Intr = CGM.getIntrinsic(IID);
13984     return Builder.CreateCall(Intr, {LHS, RHS});
13985   }
13986 
13987   case X86::BI__builtin_ia32_kaddqi:
13988   case X86::BI__builtin_ia32_kaddhi:
13989   case X86::BI__builtin_ia32_kaddsi:
13990   case X86::BI__builtin_ia32_kadddi: {
13991     Intrinsic::ID IID;
13992     switch (BuiltinID) {
13993     default: llvm_unreachable("Unsupported intrinsic!");
13994     case X86::BI__builtin_ia32_kaddqi:
13995       IID = Intrinsic::x86_avx512_kadd_b;
13996       break;
13997     case X86::BI__builtin_ia32_kaddhi:
13998       IID = Intrinsic::x86_avx512_kadd_w;
13999       break;
14000     case X86::BI__builtin_ia32_kaddsi:
14001       IID = Intrinsic::x86_avx512_kadd_d;
14002       break;
14003     case X86::BI__builtin_ia32_kadddi:
14004       IID = Intrinsic::x86_avx512_kadd_q;
14005       break;
14006     }
14007 
14008     unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14009     Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
14010     Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
14011     Function *Intr = CGM.getIntrinsic(IID);
14012     Value *Res = Builder.CreateCall(Intr, {LHS, RHS});
14013     return Builder.CreateBitCast(Res, Ops[0]->getType());
14014   }
14015   case X86::BI__builtin_ia32_kandqi:
14016   case X86::BI__builtin_ia32_kandhi:
14017   case X86::BI__builtin_ia32_kandsi:
14018   case X86::BI__builtin_ia32_kanddi:
14019     return EmitX86MaskLogic(*this, Instruction::And, Ops);
14020   case X86::BI__builtin_ia32_kandnqi:
14021   case X86::BI__builtin_ia32_kandnhi:
14022   case X86::BI__builtin_ia32_kandnsi:
14023   case X86::BI__builtin_ia32_kandndi:
14024     return EmitX86MaskLogic(*this, Instruction::And, Ops, true);
14025   case X86::BI__builtin_ia32_korqi:
14026   case X86::BI__builtin_ia32_korhi:
14027   case X86::BI__builtin_ia32_korsi:
14028   case X86::BI__builtin_ia32_kordi:
14029     return EmitX86MaskLogic(*this, Instruction::Or, Ops);
14030   case X86::BI__builtin_ia32_kxnorqi:
14031   case X86::BI__builtin_ia32_kxnorhi:
14032   case X86::BI__builtin_ia32_kxnorsi:
14033   case X86::BI__builtin_ia32_kxnordi:
14034     return EmitX86MaskLogic(*this, Instruction::Xor, Ops, true);
14035   case X86::BI__builtin_ia32_kxorqi:
14036   case X86::BI__builtin_ia32_kxorhi:
14037   case X86::BI__builtin_ia32_kxorsi:
14038   case X86::BI__builtin_ia32_kxordi:
14039     return EmitX86MaskLogic(*this, Instruction::Xor,  Ops);
14040   case X86::BI__builtin_ia32_knotqi:
14041   case X86::BI__builtin_ia32_knothi:
14042   case X86::BI__builtin_ia32_knotsi:
14043   case X86::BI__builtin_ia32_knotdi: {
14044     unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14045     Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
14046     return Builder.CreateBitCast(Builder.CreateNot(Res),
14047                                  Ops[0]->getType());
14048   }
14049   case X86::BI__builtin_ia32_kmovb:
14050   case X86::BI__builtin_ia32_kmovw:
14051   case X86::BI__builtin_ia32_kmovd:
14052   case X86::BI__builtin_ia32_kmovq: {
14053     // Bitcast to vXi1 type and then back to integer. This gets the mask
14054     // register type into the IR, but might be optimized out depending on
14055     // what's around it.
14056     unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14057     Value *Res = getMaskVecValue(*this, Ops[0], NumElts);
14058     return Builder.CreateBitCast(Res, Ops[0]->getType());
14059   }
14060 
14061   case X86::BI__builtin_ia32_kunpckdi:
14062   case X86::BI__builtin_ia32_kunpcksi:
14063   case X86::BI__builtin_ia32_kunpckhi: {
14064     unsigned NumElts = Ops[0]->getType()->getIntegerBitWidth();
14065     Value *LHS = getMaskVecValue(*this, Ops[0], NumElts);
14066     Value *RHS = getMaskVecValue(*this, Ops[1], NumElts);
14067     int Indices[64];
14068     for (unsigned i = 0; i != NumElts; ++i)
14069       Indices[i] = i;
14070 
14071     // First extract half of each vector. This gives better codegen than
14072     // doing it in a single shuffle.
14073     LHS = Builder.CreateShuffleVector(LHS, LHS,
14074                                       makeArrayRef(Indices, NumElts / 2));
14075     RHS = Builder.CreateShuffleVector(RHS, RHS,
14076                                       makeArrayRef(Indices, NumElts / 2));
14077     // Concat the vectors.
14078     // NOTE: Operands are swapped to match the intrinsic definition.
14079     Value *Res = Builder.CreateShuffleVector(RHS, LHS,
14080                                              makeArrayRef(Indices, NumElts));
14081     return Builder.CreateBitCast(Res, Ops[0]->getType());
14082   }
14083 
14084   case X86::BI__builtin_ia32_vplzcntd_128:
14085   case X86::BI__builtin_ia32_vplzcntd_256:
14086   case X86::BI__builtin_ia32_vplzcntd_512:
14087   case X86::BI__builtin_ia32_vplzcntq_128:
14088   case X86::BI__builtin_ia32_vplzcntq_256:
14089   case X86::BI__builtin_ia32_vplzcntq_512: {
14090     Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
14091     return Builder.CreateCall(F, {Ops[0],Builder.getInt1(false)});
14092   }
14093   case X86::BI__builtin_ia32_sqrtss:
14094   case X86::BI__builtin_ia32_sqrtsd: {
14095     Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
14096     Function *F;
14097     if (Builder.getIsFPConstrained()) {
14098       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14099       F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
14100                            A->getType());
14101       A = Builder.CreateConstrainedFPCall(F, {A});
14102     } else {
14103       F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
14104       A = Builder.CreateCall(F, {A});
14105     }
14106     return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
14107   }
14108   case X86::BI__builtin_ia32_sqrtsh_round_mask:
14109   case X86::BI__builtin_ia32_sqrtsd_round_mask:
14110   case X86::BI__builtin_ia32_sqrtss_round_mask: {
14111     unsigned CC = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
14112     // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
14113     // otherwise keep the intrinsic.
14114     if (CC != 4) {
14115       Intrinsic::ID IID;
14116 
14117       switch (BuiltinID) {
14118       default:
14119         llvm_unreachable("Unsupported intrinsic!");
14120       case X86::BI__builtin_ia32_sqrtsh_round_mask:
14121         IID = Intrinsic::x86_avx512fp16_mask_sqrt_sh;
14122         break;
14123       case X86::BI__builtin_ia32_sqrtsd_round_mask:
14124         IID = Intrinsic::x86_avx512_mask_sqrt_sd;
14125         break;
14126       case X86::BI__builtin_ia32_sqrtss_round_mask:
14127         IID = Intrinsic::x86_avx512_mask_sqrt_ss;
14128         break;
14129       }
14130       return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14131     }
14132     Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
14133     Function *F;
14134     if (Builder.getIsFPConstrained()) {
14135       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14136       F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
14137                            A->getType());
14138       A = Builder.CreateConstrainedFPCall(F, A);
14139     } else {
14140       F = CGM.getIntrinsic(Intrinsic::sqrt, A->getType());
14141       A = Builder.CreateCall(F, A);
14142     }
14143     Value *Src = Builder.CreateExtractElement(Ops[2], (uint64_t)0);
14144     A = EmitX86ScalarSelect(*this, Ops[3], A, Src);
14145     return Builder.CreateInsertElement(Ops[0], A, (uint64_t)0);
14146   }
14147   case X86::BI__builtin_ia32_sqrtpd256:
14148   case X86::BI__builtin_ia32_sqrtpd:
14149   case X86::BI__builtin_ia32_sqrtps256:
14150   case X86::BI__builtin_ia32_sqrtps:
14151   case X86::BI__builtin_ia32_sqrtph256:
14152   case X86::BI__builtin_ia32_sqrtph:
14153   case X86::BI__builtin_ia32_sqrtph512:
14154   case X86::BI__builtin_ia32_sqrtps512:
14155   case X86::BI__builtin_ia32_sqrtpd512: {
14156     if (Ops.size() == 2) {
14157       unsigned CC = cast<llvm::ConstantInt>(Ops[1])->getZExtValue();
14158       // Support only if the rounding mode is 4 (AKA CUR_DIRECTION),
14159       // otherwise keep the intrinsic.
14160       if (CC != 4) {
14161         Intrinsic::ID IID;
14162 
14163         switch (BuiltinID) {
14164         default:
14165           llvm_unreachable("Unsupported intrinsic!");
14166         case X86::BI__builtin_ia32_sqrtph512:
14167           IID = Intrinsic::x86_avx512fp16_sqrt_ph_512;
14168           break;
14169         case X86::BI__builtin_ia32_sqrtps512:
14170           IID = Intrinsic::x86_avx512_sqrt_ps_512;
14171           break;
14172         case X86::BI__builtin_ia32_sqrtpd512:
14173           IID = Intrinsic::x86_avx512_sqrt_pd_512;
14174           break;
14175         }
14176         return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14177       }
14178     }
14179     if (Builder.getIsFPConstrained()) {
14180       CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14181       Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
14182                                      Ops[0]->getType());
14183       return Builder.CreateConstrainedFPCall(F, Ops[0]);
14184     } else {
14185       Function *F = CGM.getIntrinsic(Intrinsic::sqrt, Ops[0]->getType());
14186       return Builder.CreateCall(F, Ops[0]);
14187     }
14188   }
14189   case X86::BI__builtin_ia32_pabsb128:
14190   case X86::BI__builtin_ia32_pabsw128:
14191   case X86::BI__builtin_ia32_pabsd128:
14192   case X86::BI__builtin_ia32_pabsb256:
14193   case X86::BI__builtin_ia32_pabsw256:
14194   case X86::BI__builtin_ia32_pabsd256:
14195   case X86::BI__builtin_ia32_pabsq128:
14196   case X86::BI__builtin_ia32_pabsq256:
14197   case X86::BI__builtin_ia32_pabsb512:
14198   case X86::BI__builtin_ia32_pabsw512:
14199   case X86::BI__builtin_ia32_pabsd512:
14200   case X86::BI__builtin_ia32_pabsq512: {
14201     Function *F = CGM.getIntrinsic(Intrinsic::abs, Ops[0]->getType());
14202     return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
14203   }
14204   case X86::BI__builtin_ia32_pmaxsb128:
14205   case X86::BI__builtin_ia32_pmaxsw128:
14206   case X86::BI__builtin_ia32_pmaxsd128:
14207   case X86::BI__builtin_ia32_pmaxsq128:
14208   case X86::BI__builtin_ia32_pmaxsb256:
14209   case X86::BI__builtin_ia32_pmaxsw256:
14210   case X86::BI__builtin_ia32_pmaxsd256:
14211   case X86::BI__builtin_ia32_pmaxsq256:
14212   case X86::BI__builtin_ia32_pmaxsb512:
14213   case X86::BI__builtin_ia32_pmaxsw512:
14214   case X86::BI__builtin_ia32_pmaxsd512:
14215   case X86::BI__builtin_ia32_pmaxsq512:
14216     return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smax);
14217   case X86::BI__builtin_ia32_pmaxub128:
14218   case X86::BI__builtin_ia32_pmaxuw128:
14219   case X86::BI__builtin_ia32_pmaxud128:
14220   case X86::BI__builtin_ia32_pmaxuq128:
14221   case X86::BI__builtin_ia32_pmaxub256:
14222   case X86::BI__builtin_ia32_pmaxuw256:
14223   case X86::BI__builtin_ia32_pmaxud256:
14224   case X86::BI__builtin_ia32_pmaxuq256:
14225   case X86::BI__builtin_ia32_pmaxub512:
14226   case X86::BI__builtin_ia32_pmaxuw512:
14227   case X86::BI__builtin_ia32_pmaxud512:
14228   case X86::BI__builtin_ia32_pmaxuq512:
14229     return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umax);
14230   case X86::BI__builtin_ia32_pminsb128:
14231   case X86::BI__builtin_ia32_pminsw128:
14232   case X86::BI__builtin_ia32_pminsd128:
14233   case X86::BI__builtin_ia32_pminsq128:
14234   case X86::BI__builtin_ia32_pminsb256:
14235   case X86::BI__builtin_ia32_pminsw256:
14236   case X86::BI__builtin_ia32_pminsd256:
14237   case X86::BI__builtin_ia32_pminsq256:
14238   case X86::BI__builtin_ia32_pminsb512:
14239   case X86::BI__builtin_ia32_pminsw512:
14240   case X86::BI__builtin_ia32_pminsd512:
14241   case X86::BI__builtin_ia32_pminsq512:
14242     return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::smin);
14243   case X86::BI__builtin_ia32_pminub128:
14244   case X86::BI__builtin_ia32_pminuw128:
14245   case X86::BI__builtin_ia32_pminud128:
14246   case X86::BI__builtin_ia32_pminuq128:
14247   case X86::BI__builtin_ia32_pminub256:
14248   case X86::BI__builtin_ia32_pminuw256:
14249   case X86::BI__builtin_ia32_pminud256:
14250   case X86::BI__builtin_ia32_pminuq256:
14251   case X86::BI__builtin_ia32_pminub512:
14252   case X86::BI__builtin_ia32_pminuw512:
14253   case X86::BI__builtin_ia32_pminud512:
14254   case X86::BI__builtin_ia32_pminuq512:
14255     return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::umin);
14256 
14257   case X86::BI__builtin_ia32_pmuludq128:
14258   case X86::BI__builtin_ia32_pmuludq256:
14259   case X86::BI__builtin_ia32_pmuludq512:
14260     return EmitX86Muldq(*this, /*IsSigned*/false, Ops);
14261 
14262   case X86::BI__builtin_ia32_pmuldq128:
14263   case X86::BI__builtin_ia32_pmuldq256:
14264   case X86::BI__builtin_ia32_pmuldq512:
14265     return EmitX86Muldq(*this, /*IsSigned*/true, Ops);
14266 
14267   case X86::BI__builtin_ia32_pternlogd512_mask:
14268   case X86::BI__builtin_ia32_pternlogq512_mask:
14269   case X86::BI__builtin_ia32_pternlogd128_mask:
14270   case X86::BI__builtin_ia32_pternlogd256_mask:
14271   case X86::BI__builtin_ia32_pternlogq128_mask:
14272   case X86::BI__builtin_ia32_pternlogq256_mask:
14273     return EmitX86Ternlog(*this, /*ZeroMask*/false, Ops);
14274 
14275   case X86::BI__builtin_ia32_pternlogd512_maskz:
14276   case X86::BI__builtin_ia32_pternlogq512_maskz:
14277   case X86::BI__builtin_ia32_pternlogd128_maskz:
14278   case X86::BI__builtin_ia32_pternlogd256_maskz:
14279   case X86::BI__builtin_ia32_pternlogq128_maskz:
14280   case X86::BI__builtin_ia32_pternlogq256_maskz:
14281     return EmitX86Ternlog(*this, /*ZeroMask*/true, Ops);
14282 
14283   case X86::BI__builtin_ia32_vpshldd128:
14284   case X86::BI__builtin_ia32_vpshldd256:
14285   case X86::BI__builtin_ia32_vpshldd512:
14286   case X86::BI__builtin_ia32_vpshldq128:
14287   case X86::BI__builtin_ia32_vpshldq256:
14288   case X86::BI__builtin_ia32_vpshldq512:
14289   case X86::BI__builtin_ia32_vpshldw128:
14290   case X86::BI__builtin_ia32_vpshldw256:
14291   case X86::BI__builtin_ia32_vpshldw512:
14292     return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
14293 
14294   case X86::BI__builtin_ia32_vpshrdd128:
14295   case X86::BI__builtin_ia32_vpshrdd256:
14296   case X86::BI__builtin_ia32_vpshrdd512:
14297   case X86::BI__builtin_ia32_vpshrdq128:
14298   case X86::BI__builtin_ia32_vpshrdq256:
14299   case X86::BI__builtin_ia32_vpshrdq512:
14300   case X86::BI__builtin_ia32_vpshrdw128:
14301   case X86::BI__builtin_ia32_vpshrdw256:
14302   case X86::BI__builtin_ia32_vpshrdw512:
14303     // Ops 0 and 1 are swapped.
14304     return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
14305 
14306   case X86::BI__builtin_ia32_vpshldvd128:
14307   case X86::BI__builtin_ia32_vpshldvd256:
14308   case X86::BI__builtin_ia32_vpshldvd512:
14309   case X86::BI__builtin_ia32_vpshldvq128:
14310   case X86::BI__builtin_ia32_vpshldvq256:
14311   case X86::BI__builtin_ia32_vpshldvq512:
14312   case X86::BI__builtin_ia32_vpshldvw128:
14313   case X86::BI__builtin_ia32_vpshldvw256:
14314   case X86::BI__builtin_ia32_vpshldvw512:
14315     return EmitX86FunnelShift(*this, Ops[0], Ops[1], Ops[2], false);
14316 
14317   case X86::BI__builtin_ia32_vpshrdvd128:
14318   case X86::BI__builtin_ia32_vpshrdvd256:
14319   case X86::BI__builtin_ia32_vpshrdvd512:
14320   case X86::BI__builtin_ia32_vpshrdvq128:
14321   case X86::BI__builtin_ia32_vpshrdvq256:
14322   case X86::BI__builtin_ia32_vpshrdvq512:
14323   case X86::BI__builtin_ia32_vpshrdvw128:
14324   case X86::BI__builtin_ia32_vpshrdvw256:
14325   case X86::BI__builtin_ia32_vpshrdvw512:
14326     // Ops 0 and 1 are swapped.
14327     return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
14328 
14329   // Reductions
14330   case X86::BI__builtin_ia32_reduce_add_d512:
14331   case X86::BI__builtin_ia32_reduce_add_q512: {
14332     Function *F =
14333         CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
14334     return Builder.CreateCall(F, {Ops[0]});
14335   }
14336   case X86::BI__builtin_ia32_reduce_and_d512:
14337   case X86::BI__builtin_ia32_reduce_and_q512: {
14338     Function *F =
14339         CGM.getIntrinsic(Intrinsic::vector_reduce_and, Ops[0]->getType());
14340     return Builder.CreateCall(F, {Ops[0]});
14341   }
14342   case X86::BI__builtin_ia32_reduce_fadd_pd512:
14343   case X86::BI__builtin_ia32_reduce_fadd_ps512:
14344   case X86::BI__builtin_ia32_reduce_fadd_ph512:
14345   case X86::BI__builtin_ia32_reduce_fadd_ph256:
14346   case X86::BI__builtin_ia32_reduce_fadd_ph128: {
14347     Function *F =
14348         CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
14349     Builder.getFastMathFlags().setAllowReassoc();
14350     return Builder.CreateCall(F, {Ops[0], Ops[1]});
14351   }
14352   case X86::BI__builtin_ia32_reduce_fmul_pd512:
14353   case X86::BI__builtin_ia32_reduce_fmul_ps512:
14354   case X86::BI__builtin_ia32_reduce_fmul_ph512:
14355   case X86::BI__builtin_ia32_reduce_fmul_ph256:
14356   case X86::BI__builtin_ia32_reduce_fmul_ph128: {
14357     Function *F =
14358         CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
14359     Builder.getFastMathFlags().setAllowReassoc();
14360     return Builder.CreateCall(F, {Ops[0], Ops[1]});
14361   }
14362   case X86::BI__builtin_ia32_reduce_fmax_pd512:
14363   case X86::BI__builtin_ia32_reduce_fmax_ps512:
14364   case X86::BI__builtin_ia32_reduce_fmax_ph512:
14365   case X86::BI__builtin_ia32_reduce_fmax_ph256:
14366   case X86::BI__builtin_ia32_reduce_fmax_ph128: {
14367     Function *F =
14368         CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
14369     Builder.getFastMathFlags().setNoNaNs();
14370     return Builder.CreateCall(F, {Ops[0]});
14371   }
14372   case X86::BI__builtin_ia32_reduce_fmin_pd512:
14373   case X86::BI__builtin_ia32_reduce_fmin_ps512:
14374   case X86::BI__builtin_ia32_reduce_fmin_ph512:
14375   case X86::BI__builtin_ia32_reduce_fmin_ph256:
14376   case X86::BI__builtin_ia32_reduce_fmin_ph128: {
14377     Function *F =
14378         CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
14379     Builder.getFastMathFlags().setNoNaNs();
14380     return Builder.CreateCall(F, {Ops[0]});
14381   }
14382   case X86::BI__builtin_ia32_reduce_mul_d512:
14383   case X86::BI__builtin_ia32_reduce_mul_q512: {
14384     Function *F =
14385         CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
14386     return Builder.CreateCall(F, {Ops[0]});
14387   }
14388   case X86::BI__builtin_ia32_reduce_or_d512:
14389   case X86::BI__builtin_ia32_reduce_or_q512: {
14390     Function *F =
14391         CGM.getIntrinsic(Intrinsic::vector_reduce_or, Ops[0]->getType());
14392     return Builder.CreateCall(F, {Ops[0]});
14393   }
14394   case X86::BI__builtin_ia32_reduce_smax_d512:
14395   case X86::BI__builtin_ia32_reduce_smax_q512: {
14396     Function *F =
14397         CGM.getIntrinsic(Intrinsic::vector_reduce_smax, Ops[0]->getType());
14398     return Builder.CreateCall(F, {Ops[0]});
14399   }
14400   case X86::BI__builtin_ia32_reduce_smin_d512:
14401   case X86::BI__builtin_ia32_reduce_smin_q512: {
14402     Function *F =
14403         CGM.getIntrinsic(Intrinsic::vector_reduce_smin, Ops[0]->getType());
14404     return Builder.CreateCall(F, {Ops[0]});
14405   }
14406   case X86::BI__builtin_ia32_reduce_umax_d512:
14407   case X86::BI__builtin_ia32_reduce_umax_q512: {
14408     Function *F =
14409         CGM.getIntrinsic(Intrinsic::vector_reduce_umax, Ops[0]->getType());
14410     return Builder.CreateCall(F, {Ops[0]});
14411   }
14412   case X86::BI__builtin_ia32_reduce_umin_d512:
14413   case X86::BI__builtin_ia32_reduce_umin_q512: {
14414     Function *F =
14415         CGM.getIntrinsic(Intrinsic::vector_reduce_umin, Ops[0]->getType());
14416     return Builder.CreateCall(F, {Ops[0]});
14417   }
14418 
14419   // 3DNow!
14420   case X86::BI__builtin_ia32_pswapdsf:
14421   case X86::BI__builtin_ia32_pswapdsi: {
14422     llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
14423     Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
14424     llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_3dnowa_pswapd);
14425     return Builder.CreateCall(F, Ops, "pswapd");
14426   }
14427   case X86::BI__builtin_ia32_rdrand16_step:
14428   case X86::BI__builtin_ia32_rdrand32_step:
14429   case X86::BI__builtin_ia32_rdrand64_step:
14430   case X86::BI__builtin_ia32_rdseed16_step:
14431   case X86::BI__builtin_ia32_rdseed32_step:
14432   case X86::BI__builtin_ia32_rdseed64_step: {
14433     Intrinsic::ID ID;
14434     switch (BuiltinID) {
14435     default: llvm_unreachable("Unsupported intrinsic!");
14436     case X86::BI__builtin_ia32_rdrand16_step:
14437       ID = Intrinsic::x86_rdrand_16;
14438       break;
14439     case X86::BI__builtin_ia32_rdrand32_step:
14440       ID = Intrinsic::x86_rdrand_32;
14441       break;
14442     case X86::BI__builtin_ia32_rdrand64_step:
14443       ID = Intrinsic::x86_rdrand_64;
14444       break;
14445     case X86::BI__builtin_ia32_rdseed16_step:
14446       ID = Intrinsic::x86_rdseed_16;
14447       break;
14448     case X86::BI__builtin_ia32_rdseed32_step:
14449       ID = Intrinsic::x86_rdseed_32;
14450       break;
14451     case X86::BI__builtin_ia32_rdseed64_step:
14452       ID = Intrinsic::x86_rdseed_64;
14453       break;
14454     }
14455 
14456     Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
14457     Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 0),
14458                                       Ops[0]);
14459     return Builder.CreateExtractValue(Call, 1);
14460   }
14461   case X86::BI__builtin_ia32_addcarryx_u32:
14462   case X86::BI__builtin_ia32_addcarryx_u64:
14463   case X86::BI__builtin_ia32_subborrow_u32:
14464   case X86::BI__builtin_ia32_subborrow_u64: {
14465     Intrinsic::ID IID;
14466     switch (BuiltinID) {
14467     default: llvm_unreachable("Unsupported intrinsic!");
14468     case X86::BI__builtin_ia32_addcarryx_u32:
14469       IID = Intrinsic::x86_addcarry_32;
14470       break;
14471     case X86::BI__builtin_ia32_addcarryx_u64:
14472       IID = Intrinsic::x86_addcarry_64;
14473       break;
14474     case X86::BI__builtin_ia32_subborrow_u32:
14475       IID = Intrinsic::x86_subborrow_32;
14476       break;
14477     case X86::BI__builtin_ia32_subborrow_u64:
14478       IID = Intrinsic::x86_subborrow_64;
14479       break;
14480     }
14481 
14482     Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID),
14483                                      { Ops[0], Ops[1], Ops[2] });
14484     Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
14485                                       Ops[3]);
14486     return Builder.CreateExtractValue(Call, 0);
14487   }
14488 
14489   case X86::BI__builtin_ia32_fpclassps128_mask:
14490   case X86::BI__builtin_ia32_fpclassps256_mask:
14491   case X86::BI__builtin_ia32_fpclassps512_mask:
14492   case X86::BI__builtin_ia32_fpclassph128_mask:
14493   case X86::BI__builtin_ia32_fpclassph256_mask:
14494   case X86::BI__builtin_ia32_fpclassph512_mask:
14495   case X86::BI__builtin_ia32_fpclasspd128_mask:
14496   case X86::BI__builtin_ia32_fpclasspd256_mask:
14497   case X86::BI__builtin_ia32_fpclasspd512_mask: {
14498     unsigned NumElts =
14499         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14500     Value *MaskIn = Ops[2];
14501     Ops.erase(&Ops[2]);
14502 
14503     Intrinsic::ID ID;
14504     switch (BuiltinID) {
14505     default: llvm_unreachable("Unsupported intrinsic!");
14506     case X86::BI__builtin_ia32_fpclassph128_mask:
14507       ID = Intrinsic::x86_avx512fp16_fpclass_ph_128;
14508       break;
14509     case X86::BI__builtin_ia32_fpclassph256_mask:
14510       ID = Intrinsic::x86_avx512fp16_fpclass_ph_256;
14511       break;
14512     case X86::BI__builtin_ia32_fpclassph512_mask:
14513       ID = Intrinsic::x86_avx512fp16_fpclass_ph_512;
14514       break;
14515     case X86::BI__builtin_ia32_fpclassps128_mask:
14516       ID = Intrinsic::x86_avx512_fpclass_ps_128;
14517       break;
14518     case X86::BI__builtin_ia32_fpclassps256_mask:
14519       ID = Intrinsic::x86_avx512_fpclass_ps_256;
14520       break;
14521     case X86::BI__builtin_ia32_fpclassps512_mask:
14522       ID = Intrinsic::x86_avx512_fpclass_ps_512;
14523       break;
14524     case X86::BI__builtin_ia32_fpclasspd128_mask:
14525       ID = Intrinsic::x86_avx512_fpclass_pd_128;
14526       break;
14527     case X86::BI__builtin_ia32_fpclasspd256_mask:
14528       ID = Intrinsic::x86_avx512_fpclass_pd_256;
14529       break;
14530     case X86::BI__builtin_ia32_fpclasspd512_mask:
14531       ID = Intrinsic::x86_avx512_fpclass_pd_512;
14532       break;
14533     }
14534 
14535     Value *Fpclass = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14536     return EmitX86MaskedCompareResult(*this, Fpclass, NumElts, MaskIn);
14537   }
14538 
14539   case X86::BI__builtin_ia32_vp2intersect_q_512:
14540   case X86::BI__builtin_ia32_vp2intersect_q_256:
14541   case X86::BI__builtin_ia32_vp2intersect_q_128:
14542   case X86::BI__builtin_ia32_vp2intersect_d_512:
14543   case X86::BI__builtin_ia32_vp2intersect_d_256:
14544   case X86::BI__builtin_ia32_vp2intersect_d_128: {
14545     unsigned NumElts =
14546         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14547     Intrinsic::ID ID;
14548 
14549     switch (BuiltinID) {
14550     default: llvm_unreachable("Unsupported intrinsic!");
14551     case X86::BI__builtin_ia32_vp2intersect_q_512:
14552       ID = Intrinsic::x86_avx512_vp2intersect_q_512;
14553       break;
14554     case X86::BI__builtin_ia32_vp2intersect_q_256:
14555       ID = Intrinsic::x86_avx512_vp2intersect_q_256;
14556       break;
14557     case X86::BI__builtin_ia32_vp2intersect_q_128:
14558       ID = Intrinsic::x86_avx512_vp2intersect_q_128;
14559       break;
14560     case X86::BI__builtin_ia32_vp2intersect_d_512:
14561       ID = Intrinsic::x86_avx512_vp2intersect_d_512;
14562       break;
14563     case X86::BI__builtin_ia32_vp2intersect_d_256:
14564       ID = Intrinsic::x86_avx512_vp2intersect_d_256;
14565       break;
14566     case X86::BI__builtin_ia32_vp2intersect_d_128:
14567       ID = Intrinsic::x86_avx512_vp2intersect_d_128;
14568       break;
14569     }
14570 
14571     Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID), {Ops[0], Ops[1]});
14572     Value *Result = Builder.CreateExtractValue(Call, 0);
14573     Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
14574     Builder.CreateDefaultAlignedStore(Result, Ops[2]);
14575 
14576     Result = Builder.CreateExtractValue(Call, 1);
14577     Result = EmitX86MaskedCompareResult(*this, Result, NumElts, nullptr);
14578     return Builder.CreateDefaultAlignedStore(Result, Ops[3]);
14579   }
14580 
14581   case X86::BI__builtin_ia32_vpmultishiftqb128:
14582   case X86::BI__builtin_ia32_vpmultishiftqb256:
14583   case X86::BI__builtin_ia32_vpmultishiftqb512: {
14584     Intrinsic::ID ID;
14585     switch (BuiltinID) {
14586     default: llvm_unreachable("Unsupported intrinsic!");
14587     case X86::BI__builtin_ia32_vpmultishiftqb128:
14588       ID = Intrinsic::x86_avx512_pmultishift_qb_128;
14589       break;
14590     case X86::BI__builtin_ia32_vpmultishiftqb256:
14591       ID = Intrinsic::x86_avx512_pmultishift_qb_256;
14592       break;
14593     case X86::BI__builtin_ia32_vpmultishiftqb512:
14594       ID = Intrinsic::x86_avx512_pmultishift_qb_512;
14595       break;
14596     }
14597 
14598     return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14599   }
14600 
14601   case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
14602   case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
14603   case X86::BI__builtin_ia32_vpshufbitqmb512_mask: {
14604     unsigned NumElts =
14605         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14606     Value *MaskIn = Ops[2];
14607     Ops.erase(&Ops[2]);
14608 
14609     Intrinsic::ID ID;
14610     switch (BuiltinID) {
14611     default: llvm_unreachable("Unsupported intrinsic!");
14612     case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
14613       ID = Intrinsic::x86_avx512_vpshufbitqmb_128;
14614       break;
14615     case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
14616       ID = Intrinsic::x86_avx512_vpshufbitqmb_256;
14617       break;
14618     case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
14619       ID = Intrinsic::x86_avx512_vpshufbitqmb_512;
14620       break;
14621     }
14622 
14623     Value *Shufbit = Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
14624     return EmitX86MaskedCompareResult(*this, Shufbit, NumElts, MaskIn);
14625   }
14626 
14627   // packed comparison intrinsics
14628   case X86::BI__builtin_ia32_cmpeqps:
14629   case X86::BI__builtin_ia32_cmpeqpd:
14630     return getVectorFCmpIR(CmpInst::FCMP_OEQ, /*IsSignaling*/false);
14631   case X86::BI__builtin_ia32_cmpltps:
14632   case X86::BI__builtin_ia32_cmpltpd:
14633     return getVectorFCmpIR(CmpInst::FCMP_OLT, /*IsSignaling*/true);
14634   case X86::BI__builtin_ia32_cmpleps:
14635   case X86::BI__builtin_ia32_cmplepd:
14636     return getVectorFCmpIR(CmpInst::FCMP_OLE, /*IsSignaling*/true);
14637   case X86::BI__builtin_ia32_cmpunordps:
14638   case X86::BI__builtin_ia32_cmpunordpd:
14639     return getVectorFCmpIR(CmpInst::FCMP_UNO, /*IsSignaling*/false);
14640   case X86::BI__builtin_ia32_cmpneqps:
14641   case X86::BI__builtin_ia32_cmpneqpd:
14642     return getVectorFCmpIR(CmpInst::FCMP_UNE, /*IsSignaling*/false);
14643   case X86::BI__builtin_ia32_cmpnltps:
14644   case X86::BI__builtin_ia32_cmpnltpd:
14645     return getVectorFCmpIR(CmpInst::FCMP_UGE, /*IsSignaling*/true);
14646   case X86::BI__builtin_ia32_cmpnleps:
14647   case X86::BI__builtin_ia32_cmpnlepd:
14648     return getVectorFCmpIR(CmpInst::FCMP_UGT, /*IsSignaling*/true);
14649   case X86::BI__builtin_ia32_cmpordps:
14650   case X86::BI__builtin_ia32_cmpordpd:
14651     return getVectorFCmpIR(CmpInst::FCMP_ORD, /*IsSignaling*/false);
14652   case X86::BI__builtin_ia32_cmpph128_mask:
14653   case X86::BI__builtin_ia32_cmpph256_mask:
14654   case X86::BI__builtin_ia32_cmpph512_mask:
14655   case X86::BI__builtin_ia32_cmpps128_mask:
14656   case X86::BI__builtin_ia32_cmpps256_mask:
14657   case X86::BI__builtin_ia32_cmpps512_mask:
14658   case X86::BI__builtin_ia32_cmppd128_mask:
14659   case X86::BI__builtin_ia32_cmppd256_mask:
14660   case X86::BI__builtin_ia32_cmppd512_mask:
14661     IsMaskFCmp = true;
14662     LLVM_FALLTHROUGH;
14663   case X86::BI__builtin_ia32_cmpps:
14664   case X86::BI__builtin_ia32_cmpps256:
14665   case X86::BI__builtin_ia32_cmppd:
14666   case X86::BI__builtin_ia32_cmppd256: {
14667     // Lowering vector comparisons to fcmp instructions, while
14668     // ignoring signalling behaviour requested
14669     // ignoring rounding mode requested
14670     // This is only possible if fp-model is not strict and FENV_ACCESS is off.
14671 
14672     // The third argument is the comparison condition, and integer in the
14673     // range [0, 31]
14674     unsigned CC = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0x1f;
14675 
14676     // Lowering to IR fcmp instruction.
14677     // Ignoring requested signaling behaviour,
14678     // e.g. both _CMP_GT_OS & _CMP_GT_OQ are translated to FCMP_OGT.
14679     FCmpInst::Predicate Pred;
14680     bool IsSignaling;
14681     // Predicates for 16-31 repeat the 0-15 predicates. Only the signalling
14682     // behavior is inverted. We'll handle that after the switch.
14683     switch (CC & 0xf) {
14684     case 0x00: Pred = FCmpInst::FCMP_OEQ;   IsSignaling = false; break;
14685     case 0x01: Pred = FCmpInst::FCMP_OLT;   IsSignaling = true;  break;
14686     case 0x02: Pred = FCmpInst::FCMP_OLE;   IsSignaling = true;  break;
14687     case 0x03: Pred = FCmpInst::FCMP_UNO;   IsSignaling = false; break;
14688     case 0x04: Pred = FCmpInst::FCMP_UNE;   IsSignaling = false; break;
14689     case 0x05: Pred = FCmpInst::FCMP_UGE;   IsSignaling = true;  break;
14690     case 0x06: Pred = FCmpInst::FCMP_UGT;   IsSignaling = true;  break;
14691     case 0x07: Pred = FCmpInst::FCMP_ORD;   IsSignaling = false; break;
14692     case 0x08: Pred = FCmpInst::FCMP_UEQ;   IsSignaling = false; break;
14693     case 0x09: Pred = FCmpInst::FCMP_ULT;   IsSignaling = true;  break;
14694     case 0x0a: Pred = FCmpInst::FCMP_ULE;   IsSignaling = true;  break;
14695     case 0x0b: Pred = FCmpInst::FCMP_FALSE; IsSignaling = false; break;
14696     case 0x0c: Pred = FCmpInst::FCMP_ONE;   IsSignaling = false; break;
14697     case 0x0d: Pred = FCmpInst::FCMP_OGE;   IsSignaling = true;  break;
14698     case 0x0e: Pred = FCmpInst::FCMP_OGT;   IsSignaling = true;  break;
14699     case 0x0f: Pred = FCmpInst::FCMP_TRUE;  IsSignaling = false; break;
14700     default: llvm_unreachable("Unhandled CC");
14701     }
14702 
14703     // Invert the signalling behavior for 16-31.
14704     if (CC & 0x10)
14705       IsSignaling = !IsSignaling;
14706 
14707     // If the predicate is true or false and we're using constrained intrinsics,
14708     // we don't have a compare intrinsic we can use. Just use the legacy X86
14709     // specific intrinsic.
14710     // If the intrinsic is mask enabled and we're using constrained intrinsics,
14711     // use the legacy X86 specific intrinsic.
14712     if (Builder.getIsFPConstrained() &&
14713         (Pred == FCmpInst::FCMP_TRUE || Pred == FCmpInst::FCMP_FALSE ||
14714          IsMaskFCmp)) {
14715 
14716       Intrinsic::ID IID;
14717       switch (BuiltinID) {
14718       default: llvm_unreachable("Unexpected builtin");
14719       case X86::BI__builtin_ia32_cmpps:
14720         IID = Intrinsic::x86_sse_cmp_ps;
14721         break;
14722       case X86::BI__builtin_ia32_cmpps256:
14723         IID = Intrinsic::x86_avx_cmp_ps_256;
14724         break;
14725       case X86::BI__builtin_ia32_cmppd:
14726         IID = Intrinsic::x86_sse2_cmp_pd;
14727         break;
14728       case X86::BI__builtin_ia32_cmppd256:
14729         IID = Intrinsic::x86_avx_cmp_pd_256;
14730         break;
14731       case X86::BI__builtin_ia32_cmpps512_mask:
14732         IID = Intrinsic::x86_avx512_mask_cmp_ps_512;
14733         break;
14734       case X86::BI__builtin_ia32_cmppd512_mask:
14735         IID = Intrinsic::x86_avx512_mask_cmp_pd_512;
14736         break;
14737       case X86::BI__builtin_ia32_cmpps128_mask:
14738         IID = Intrinsic::x86_avx512_mask_cmp_ps_128;
14739         break;
14740       case X86::BI__builtin_ia32_cmpps256_mask:
14741         IID = Intrinsic::x86_avx512_mask_cmp_ps_256;
14742         break;
14743       case X86::BI__builtin_ia32_cmppd128_mask:
14744         IID = Intrinsic::x86_avx512_mask_cmp_pd_128;
14745         break;
14746       case X86::BI__builtin_ia32_cmppd256_mask:
14747         IID = Intrinsic::x86_avx512_mask_cmp_pd_256;
14748         break;
14749       }
14750 
14751       Function *Intr = CGM.getIntrinsic(IID);
14752       if (IsMaskFCmp) {
14753         unsigned NumElts =
14754             cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14755         Ops[3] = getMaskVecValue(*this, Ops[3], NumElts);
14756         Value *Cmp = Builder.CreateCall(Intr, Ops);
14757         return EmitX86MaskedCompareResult(*this, Cmp, NumElts, nullptr);
14758       }
14759 
14760       return Builder.CreateCall(Intr, Ops);
14761     }
14762 
14763     // Builtins without the _mask suffix return a vector of integers
14764     // of the same width as the input vectors
14765     if (IsMaskFCmp) {
14766       // We ignore SAE if strict FP is disabled. We only keep precise
14767       // exception behavior under strict FP.
14768       // NOTE: If strict FP does ever go through here a CGFPOptionsRAII
14769       // object will be required.
14770       unsigned NumElts =
14771           cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
14772       Value *Cmp;
14773       if (IsSignaling)
14774         Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
14775       else
14776         Cmp = Builder.CreateFCmp(Pred, Ops[0], Ops[1]);
14777       return EmitX86MaskedCompareResult(*this, Cmp, NumElts, Ops[3]);
14778     }
14779 
14780     return getVectorFCmpIR(Pred, IsSignaling);
14781   }
14782 
14783   // SSE scalar comparison intrinsics
14784   case X86::BI__builtin_ia32_cmpeqss:
14785     return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 0);
14786   case X86::BI__builtin_ia32_cmpltss:
14787     return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 1);
14788   case X86::BI__builtin_ia32_cmpless:
14789     return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 2);
14790   case X86::BI__builtin_ia32_cmpunordss:
14791     return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 3);
14792   case X86::BI__builtin_ia32_cmpneqss:
14793     return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 4);
14794   case X86::BI__builtin_ia32_cmpnltss:
14795     return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 5);
14796   case X86::BI__builtin_ia32_cmpnless:
14797     return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 6);
14798   case X86::BI__builtin_ia32_cmpordss:
14799     return getCmpIntrinsicCall(Intrinsic::x86_sse_cmp_ss, 7);
14800   case X86::BI__builtin_ia32_cmpeqsd:
14801     return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 0);
14802   case X86::BI__builtin_ia32_cmpltsd:
14803     return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 1);
14804   case X86::BI__builtin_ia32_cmplesd:
14805     return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 2);
14806   case X86::BI__builtin_ia32_cmpunordsd:
14807     return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 3);
14808   case X86::BI__builtin_ia32_cmpneqsd:
14809     return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 4);
14810   case X86::BI__builtin_ia32_cmpnltsd:
14811     return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 5);
14812   case X86::BI__builtin_ia32_cmpnlesd:
14813     return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
14814   case X86::BI__builtin_ia32_cmpordsd:
14815     return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
14816 
14817   // f16c half2float intrinsics
14818   case X86::BI__builtin_ia32_vcvtph2ps:
14819   case X86::BI__builtin_ia32_vcvtph2ps256:
14820   case X86::BI__builtin_ia32_vcvtph2ps_mask:
14821   case X86::BI__builtin_ia32_vcvtph2ps256_mask:
14822   case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
14823     CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
14824     return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
14825   }
14826 
14827 // AVX512 bf16 intrinsics
14828   case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
14829     Ops[2] = getMaskVecValue(
14830         *this, Ops[2],
14831         cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements());
14832     Intrinsic::ID IID = Intrinsic::x86_avx512bf16_mask_cvtneps2bf16_128;
14833     return Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
14834   }
14835   case X86::BI__builtin_ia32_cvtsbf162ss_32:
14836     return EmitX86CvtBF16ToFloatExpr(*this, E, Ops);
14837 
14838   case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14839   case X86::BI__builtin_ia32_cvtneps2bf16_512_mask: {
14840     Intrinsic::ID IID;
14841     switch (BuiltinID) {
14842     default: llvm_unreachable("Unsupported intrinsic!");
14843     case X86::BI__builtin_ia32_cvtneps2bf16_256_mask:
14844       IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_256;
14845       break;
14846     case X86::BI__builtin_ia32_cvtneps2bf16_512_mask:
14847       IID = Intrinsic::x86_avx512bf16_cvtneps2bf16_512;
14848       break;
14849     }
14850     Value *Res = Builder.CreateCall(CGM.getIntrinsic(IID), Ops[0]);
14851     return EmitX86Select(*this, Ops[2], Res, Ops[1]);
14852   }
14853 
14854   case X86::BI__emul:
14855   case X86::BI__emulu: {
14856     llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
14857     bool isSigned = (BuiltinID == X86::BI__emul);
14858     Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
14859     Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
14860     return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
14861   }
14862   case X86::BI__mulh:
14863   case X86::BI__umulh:
14864   case X86::BI_mul128:
14865   case X86::BI_umul128: {
14866     llvm::Type *ResType = ConvertType(E->getType());
14867     llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
14868 
14869     bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
14870     Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
14871     Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
14872 
14873     Value *MulResult, *HigherBits;
14874     if (IsSigned) {
14875       MulResult = Builder.CreateNSWMul(LHS, RHS);
14876       HigherBits = Builder.CreateAShr(MulResult, 64);
14877     } else {
14878       MulResult = Builder.CreateNUWMul(LHS, RHS);
14879       HigherBits = Builder.CreateLShr(MulResult, 64);
14880     }
14881     HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
14882 
14883     if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
14884       return HigherBits;
14885 
14886     Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
14887     Builder.CreateStore(HigherBits, HighBitsAddress);
14888     return Builder.CreateIntCast(MulResult, ResType, IsSigned);
14889   }
14890 
14891   case X86::BI__faststorefence: {
14892     return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14893                                llvm::SyncScope::System);
14894   }
14895   case X86::BI__shiftleft128:
14896   case X86::BI__shiftright128: {
14897     llvm::Function *F = CGM.getIntrinsic(
14898         BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
14899         Int64Ty);
14900     // Flip low/high ops and zero-extend amount to matching type.
14901     // shiftleft128(Low, High, Amt) -> fshl(High, Low, Amt)
14902     // shiftright128(Low, High, Amt) -> fshr(High, Low, Amt)
14903     std::swap(Ops[0], Ops[1]);
14904     Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
14905     return Builder.CreateCall(F, Ops);
14906   }
14907   case X86::BI_ReadWriteBarrier:
14908   case X86::BI_ReadBarrier:
14909   case X86::BI_WriteBarrier: {
14910     return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
14911                                llvm::SyncScope::SingleThread);
14912   }
14913 
14914   case X86::BI_AddressOfReturnAddress: {
14915     Function *F =
14916         CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
14917     return Builder.CreateCall(F);
14918   }
14919   case X86::BI__stosb: {
14920     // We treat __stosb as a volatile memset - it may not generate "rep stosb"
14921     // instruction, but it will create a memset that won't be optimized away.
14922     return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], Align(1), true);
14923   }
14924   case X86::BI__ud2:
14925     // llvm.trap makes a ud2a instruction on x86.
14926     return EmitTrapCall(Intrinsic::trap);
14927   case X86::BI__int2c: {
14928     // This syscall signals a driver assertion failure in x86 NT kernels.
14929     llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
14930     llvm::InlineAsm *IA =
14931         llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*hasSideEffects=*/true);
14932     llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
14933         getLLVMContext(), llvm::AttributeList::FunctionIndex,
14934         llvm::Attribute::NoReturn);
14935     llvm::CallInst *CI = Builder.CreateCall(IA);
14936     CI->setAttributes(NoReturnAttr);
14937     return CI;
14938   }
14939   case X86::BI__readfsbyte:
14940   case X86::BI__readfsword:
14941   case X86::BI__readfsdword:
14942   case X86::BI__readfsqword: {
14943     llvm::Type *IntTy = ConvertType(E->getType());
14944     Value *Ptr =
14945         Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 257));
14946     LoadInst *Load = Builder.CreateAlignedLoad(
14947         IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14948     Load->setVolatile(true);
14949     return Load;
14950   }
14951   case X86::BI__readgsbyte:
14952   case X86::BI__readgsword:
14953   case X86::BI__readgsdword:
14954   case X86::BI__readgsqword: {
14955     llvm::Type *IntTy = ConvertType(E->getType());
14956     Value *Ptr =
14957         Builder.CreateIntToPtr(Ops[0], llvm::PointerType::get(IntTy, 256));
14958     LoadInst *Load = Builder.CreateAlignedLoad(
14959         IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
14960     Load->setVolatile(true);
14961     return Load;
14962   }
14963   case X86::BI__builtin_ia32_paddsb512:
14964   case X86::BI__builtin_ia32_paddsw512:
14965   case X86::BI__builtin_ia32_paddsb256:
14966   case X86::BI__builtin_ia32_paddsw256:
14967   case X86::BI__builtin_ia32_paddsb128:
14968   case X86::BI__builtin_ia32_paddsw128:
14969     return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
14970   case X86::BI__builtin_ia32_paddusb512:
14971   case X86::BI__builtin_ia32_paddusw512:
14972   case X86::BI__builtin_ia32_paddusb256:
14973   case X86::BI__builtin_ia32_paddusw256:
14974   case X86::BI__builtin_ia32_paddusb128:
14975   case X86::BI__builtin_ia32_paddusw128:
14976     return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
14977   case X86::BI__builtin_ia32_psubsb512:
14978   case X86::BI__builtin_ia32_psubsw512:
14979   case X86::BI__builtin_ia32_psubsb256:
14980   case X86::BI__builtin_ia32_psubsw256:
14981   case X86::BI__builtin_ia32_psubsb128:
14982   case X86::BI__builtin_ia32_psubsw128:
14983     return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
14984   case X86::BI__builtin_ia32_psubusb512:
14985   case X86::BI__builtin_ia32_psubusw512:
14986   case X86::BI__builtin_ia32_psubusb256:
14987   case X86::BI__builtin_ia32_psubusw256:
14988   case X86::BI__builtin_ia32_psubusb128:
14989   case X86::BI__builtin_ia32_psubusw128:
14990     return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
14991   case X86::BI__builtin_ia32_encodekey128_u32: {
14992     Intrinsic::ID IID = Intrinsic::x86_encodekey128;
14993 
14994     Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1]});
14995 
14996     for (int i = 0; i < 3; ++i) {
14997       Value *Extract = Builder.CreateExtractValue(Call, i + 1);
14998       Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
14999       Ptr = Builder.CreateBitCast(
15000           Ptr, llvm::PointerType::getUnqual(Extract->getType()));
15001       Builder.CreateAlignedStore(Extract, Ptr, Align(1));
15002     }
15003 
15004     return Builder.CreateExtractValue(Call, 0);
15005   }
15006   case X86::BI__builtin_ia32_encodekey256_u32: {
15007     Intrinsic::ID IID = Intrinsic::x86_encodekey256;
15008 
15009     Value *Call =
15010         Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[0], Ops[1], Ops[2]});
15011 
15012     for (int i = 0; i < 4; ++i) {
15013       Value *Extract = Builder.CreateExtractValue(Call, i + 1);
15014       Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
15015       Ptr = Builder.CreateBitCast(
15016           Ptr, llvm::PointerType::getUnqual(Extract->getType()));
15017       Builder.CreateAlignedStore(Extract, Ptr, Align(1));
15018     }
15019 
15020     return Builder.CreateExtractValue(Call, 0);
15021   }
15022   case X86::BI__builtin_ia32_aesenc128kl_u8:
15023   case X86::BI__builtin_ia32_aesdec128kl_u8:
15024   case X86::BI__builtin_ia32_aesenc256kl_u8:
15025   case X86::BI__builtin_ia32_aesdec256kl_u8: {
15026     Intrinsic::ID IID;
15027     StringRef BlockName;
15028     switch (BuiltinID) {
15029     default:
15030       llvm_unreachable("Unexpected builtin");
15031     case X86::BI__builtin_ia32_aesenc128kl_u8:
15032       IID = Intrinsic::x86_aesenc128kl;
15033       BlockName = "aesenc128kl";
15034       break;
15035     case X86::BI__builtin_ia32_aesdec128kl_u8:
15036       IID = Intrinsic::x86_aesdec128kl;
15037       BlockName = "aesdec128kl";
15038       break;
15039     case X86::BI__builtin_ia32_aesenc256kl_u8:
15040       IID = Intrinsic::x86_aesenc256kl;
15041       BlockName = "aesenc256kl";
15042       break;
15043     case X86::BI__builtin_ia32_aesdec256kl_u8:
15044       IID = Intrinsic::x86_aesdec256kl;
15045       BlockName = "aesdec256kl";
15046       break;
15047     }
15048 
15049     Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
15050 
15051     BasicBlock *NoError =
15052         createBasicBlock(BlockName + "_no_error", this->CurFn);
15053     BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
15054     BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
15055 
15056     Value *Ret = Builder.CreateExtractValue(Call, 0);
15057     Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
15058     Value *Out = Builder.CreateExtractValue(Call, 1);
15059     Builder.CreateCondBr(Succ, NoError, Error);
15060 
15061     Builder.SetInsertPoint(NoError);
15062     Builder.CreateDefaultAlignedStore(Out, Ops[0]);
15063     Builder.CreateBr(End);
15064 
15065     Builder.SetInsertPoint(Error);
15066     Constant *Zero = llvm::Constant::getNullValue(Out->getType());
15067     Builder.CreateDefaultAlignedStore(Zero, Ops[0]);
15068     Builder.CreateBr(End);
15069 
15070     Builder.SetInsertPoint(End);
15071     return Builder.CreateExtractValue(Call, 0);
15072   }
15073   case X86::BI__builtin_ia32_aesencwide128kl_u8:
15074   case X86::BI__builtin_ia32_aesdecwide128kl_u8:
15075   case X86::BI__builtin_ia32_aesencwide256kl_u8:
15076   case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
15077     Intrinsic::ID IID;
15078     StringRef BlockName;
15079     switch (BuiltinID) {
15080     case X86::BI__builtin_ia32_aesencwide128kl_u8:
15081       IID = Intrinsic::x86_aesencwide128kl;
15082       BlockName = "aesencwide128kl";
15083       break;
15084     case X86::BI__builtin_ia32_aesdecwide128kl_u8:
15085       IID = Intrinsic::x86_aesdecwide128kl;
15086       BlockName = "aesdecwide128kl";
15087       break;
15088     case X86::BI__builtin_ia32_aesencwide256kl_u8:
15089       IID = Intrinsic::x86_aesencwide256kl;
15090       BlockName = "aesencwide256kl";
15091       break;
15092     case X86::BI__builtin_ia32_aesdecwide256kl_u8:
15093       IID = Intrinsic::x86_aesdecwide256kl;
15094       BlockName = "aesdecwide256kl";
15095       break;
15096     }
15097 
15098     llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2);
15099     Value *InOps[9];
15100     InOps[0] = Ops[2];
15101     for (int i = 0; i != 8; ++i) {
15102       Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i);
15103       InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16));
15104     }
15105 
15106     Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
15107 
15108     BasicBlock *NoError =
15109         createBasicBlock(BlockName + "_no_error", this->CurFn);
15110     BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
15111     BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
15112 
15113     Value *Ret = Builder.CreateExtractValue(Call, 0);
15114     Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
15115     Builder.CreateCondBr(Succ, NoError, Error);
15116 
15117     Builder.SetInsertPoint(NoError);
15118     for (int i = 0; i != 8; ++i) {
15119       Value *Extract = Builder.CreateExtractValue(Call, i + 1);
15120       Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i);
15121       Builder.CreateAlignedStore(Extract, Ptr, Align(16));
15122     }
15123     Builder.CreateBr(End);
15124 
15125     Builder.SetInsertPoint(Error);
15126     for (int i = 0; i != 8; ++i) {
15127       Value *Out = Builder.CreateExtractValue(Call, i + 1);
15128       Constant *Zero = llvm::Constant::getNullValue(Out->getType());
15129       Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i);
15130       Builder.CreateAlignedStore(Zero, Ptr, Align(16));
15131     }
15132     Builder.CreateBr(End);
15133 
15134     Builder.SetInsertPoint(End);
15135     return Builder.CreateExtractValue(Call, 0);
15136   }
15137   case X86::BI__builtin_ia32_vfcmaddcph512_mask:
15138     IsConjFMA = true;
15139     LLVM_FALLTHROUGH;
15140   case X86::BI__builtin_ia32_vfmaddcph512_mask: {
15141     Intrinsic::ID IID = IsConjFMA
15142                             ? Intrinsic::x86_avx512fp16_mask_vfcmadd_cph_512
15143                             : Intrinsic::x86_avx512fp16_mask_vfmadd_cph_512;
15144     Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15145     return EmitX86Select(*this, Ops[3], Call, Ops[0]);
15146   }
15147   case X86::BI__builtin_ia32_vfcmaddcsh_round_mask:
15148     IsConjFMA = true;
15149     LLVM_FALLTHROUGH;
15150   case X86::BI__builtin_ia32_vfmaddcsh_round_mask: {
15151     Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
15152                                   : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
15153     Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15154     Value *And = Builder.CreateAnd(Ops[3], llvm::ConstantInt::get(Int8Ty, 1));
15155     return EmitX86Select(*this, And, Call, Ops[0]);
15156   }
15157   case X86::BI__builtin_ia32_vfcmaddcsh_round_mask3:
15158     IsConjFMA = true;
15159     LLVM_FALLTHROUGH;
15160   case X86::BI__builtin_ia32_vfmaddcsh_round_mask3: {
15161     Intrinsic::ID IID = IsConjFMA ? Intrinsic::x86_avx512fp16_mask_vfcmadd_csh
15162                                   : Intrinsic::x86_avx512fp16_mask_vfmadd_csh;
15163     Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), Ops);
15164     static constexpr int Mask[] = {0, 5, 6, 7};
15165     return Builder.CreateShuffleVector(Call, Ops[2], Mask);
15166   }
15167   }
15168 }
15169 
15170 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
15171                                            const CallExpr *E) {
15172   SmallVector<Value*, 4> Ops;
15173 
15174   for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
15175     Ops.push_back(EmitScalarExpr(E->getArg(i)));
15176 
15177   Intrinsic::ID ID = Intrinsic::not_intrinsic;
15178 
15179   switch (BuiltinID) {
15180   default: return nullptr;
15181 
15182   // __builtin_ppc_get_timebase is GCC 4.8+'s PowerPC-specific name for what we
15183   // call __builtin_readcyclecounter.
15184   case PPC::BI__builtin_ppc_get_timebase:
15185     return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
15186 
15187   // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
15188   case PPC::BI__builtin_altivec_lvx:
15189   case PPC::BI__builtin_altivec_lvxl:
15190   case PPC::BI__builtin_altivec_lvebx:
15191   case PPC::BI__builtin_altivec_lvehx:
15192   case PPC::BI__builtin_altivec_lvewx:
15193   case PPC::BI__builtin_altivec_lvsl:
15194   case PPC::BI__builtin_altivec_lvsr:
15195   case PPC::BI__builtin_vsx_lxvd2x:
15196   case PPC::BI__builtin_vsx_lxvw4x:
15197   case PPC::BI__builtin_vsx_lxvd2x_be:
15198   case PPC::BI__builtin_vsx_lxvw4x_be:
15199   case PPC::BI__builtin_vsx_lxvl:
15200   case PPC::BI__builtin_vsx_lxvll:
15201   {
15202     if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
15203        BuiltinID == PPC::BI__builtin_vsx_lxvll){
15204       Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
15205     }else {
15206       Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
15207       Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
15208       Ops.pop_back();
15209     }
15210 
15211     switch (BuiltinID) {
15212     default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
15213     case PPC::BI__builtin_altivec_lvx:
15214       ID = Intrinsic::ppc_altivec_lvx;
15215       break;
15216     case PPC::BI__builtin_altivec_lvxl:
15217       ID = Intrinsic::ppc_altivec_lvxl;
15218       break;
15219     case PPC::BI__builtin_altivec_lvebx:
15220       ID = Intrinsic::ppc_altivec_lvebx;
15221       break;
15222     case PPC::BI__builtin_altivec_lvehx:
15223       ID = Intrinsic::ppc_altivec_lvehx;
15224       break;
15225     case PPC::BI__builtin_altivec_lvewx:
15226       ID = Intrinsic::ppc_altivec_lvewx;
15227       break;
15228     case PPC::BI__builtin_altivec_lvsl:
15229       ID = Intrinsic::ppc_altivec_lvsl;
15230       break;
15231     case PPC::BI__builtin_altivec_lvsr:
15232       ID = Intrinsic::ppc_altivec_lvsr;
15233       break;
15234     case PPC::BI__builtin_vsx_lxvd2x:
15235       ID = Intrinsic::ppc_vsx_lxvd2x;
15236       break;
15237     case PPC::BI__builtin_vsx_lxvw4x:
15238       ID = Intrinsic::ppc_vsx_lxvw4x;
15239       break;
15240     case PPC::BI__builtin_vsx_lxvd2x_be:
15241       ID = Intrinsic::ppc_vsx_lxvd2x_be;
15242       break;
15243     case PPC::BI__builtin_vsx_lxvw4x_be:
15244       ID = Intrinsic::ppc_vsx_lxvw4x_be;
15245       break;
15246     case PPC::BI__builtin_vsx_lxvl:
15247       ID = Intrinsic::ppc_vsx_lxvl;
15248       break;
15249     case PPC::BI__builtin_vsx_lxvll:
15250       ID = Intrinsic::ppc_vsx_lxvll;
15251       break;
15252     }
15253     llvm::Function *F = CGM.getIntrinsic(ID);
15254     return Builder.CreateCall(F, Ops, "");
15255   }
15256 
15257   // vec_st, vec_xst_be
15258   case PPC::BI__builtin_altivec_stvx:
15259   case PPC::BI__builtin_altivec_stvxl:
15260   case PPC::BI__builtin_altivec_stvebx:
15261   case PPC::BI__builtin_altivec_stvehx:
15262   case PPC::BI__builtin_altivec_stvewx:
15263   case PPC::BI__builtin_vsx_stxvd2x:
15264   case PPC::BI__builtin_vsx_stxvw4x:
15265   case PPC::BI__builtin_vsx_stxvd2x_be:
15266   case PPC::BI__builtin_vsx_stxvw4x_be:
15267   case PPC::BI__builtin_vsx_stxvl:
15268   case PPC::BI__builtin_vsx_stxvll:
15269   {
15270     if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
15271       BuiltinID == PPC::BI__builtin_vsx_stxvll ){
15272       Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
15273     }else {
15274       Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
15275       Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
15276       Ops.pop_back();
15277     }
15278 
15279     switch (BuiltinID) {
15280     default: llvm_unreachable("Unsupported st intrinsic!");
15281     case PPC::BI__builtin_altivec_stvx:
15282       ID = Intrinsic::ppc_altivec_stvx;
15283       break;
15284     case PPC::BI__builtin_altivec_stvxl:
15285       ID = Intrinsic::ppc_altivec_stvxl;
15286       break;
15287     case PPC::BI__builtin_altivec_stvebx:
15288       ID = Intrinsic::ppc_altivec_stvebx;
15289       break;
15290     case PPC::BI__builtin_altivec_stvehx:
15291       ID = Intrinsic::ppc_altivec_stvehx;
15292       break;
15293     case PPC::BI__builtin_altivec_stvewx:
15294       ID = Intrinsic::ppc_altivec_stvewx;
15295       break;
15296     case PPC::BI__builtin_vsx_stxvd2x:
15297       ID = Intrinsic::ppc_vsx_stxvd2x;
15298       break;
15299     case PPC::BI__builtin_vsx_stxvw4x:
15300       ID = Intrinsic::ppc_vsx_stxvw4x;
15301       break;
15302     case PPC::BI__builtin_vsx_stxvd2x_be:
15303       ID = Intrinsic::ppc_vsx_stxvd2x_be;
15304       break;
15305     case PPC::BI__builtin_vsx_stxvw4x_be:
15306       ID = Intrinsic::ppc_vsx_stxvw4x_be;
15307       break;
15308     case PPC::BI__builtin_vsx_stxvl:
15309       ID = Intrinsic::ppc_vsx_stxvl;
15310       break;
15311     case PPC::BI__builtin_vsx_stxvll:
15312       ID = Intrinsic::ppc_vsx_stxvll;
15313       break;
15314     }
15315     llvm::Function *F = CGM.getIntrinsic(ID);
15316     return Builder.CreateCall(F, Ops, "");
15317   }
15318   case PPC::BI__builtin_vsx_ldrmb: {
15319     // Essentially boils down to performing an unaligned VMX load sequence so
15320     // as to avoid crossing a page boundary and then shuffling the elements
15321     // into the right side of the vector register.
15322     int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
15323     llvm::Type *ResTy = ConvertType(E->getType());
15324     bool IsLE = getTarget().isLittleEndian();
15325 
15326     // If the user wants the entire vector, just load the entire vector.
15327     if (NumBytes == 16) {
15328       Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo());
15329       Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1)));
15330       if (!IsLE)
15331         return LD;
15332 
15333       // Reverse the bytes on LE.
15334       SmallVector<int, 16> RevMask;
15335       for (int Idx = 0; Idx < 16; Idx++)
15336         RevMask.push_back(15 - Idx);
15337       return Builder.CreateShuffleVector(LD, LD, RevMask);
15338     }
15339 
15340     llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx);
15341     llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr
15342                                                 : Intrinsic::ppc_altivec_lvsl);
15343     llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm);
15344     Value *HiMem = Builder.CreateGEP(
15345         Int8Ty, Ops[0], ConstantInt::get(Ops[1]->getType(), NumBytes - 1));
15346     Value *LoLd = Builder.CreateCall(Lvx, Ops[0], "ld.lo");
15347     Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi");
15348     Value *Mask1 = Builder.CreateCall(Lvs, Ops[0], "mask1");
15349 
15350     Ops.clear();
15351     Ops.push_back(IsLE ? HiLd : LoLd);
15352     Ops.push_back(IsLE ? LoLd : HiLd);
15353     Ops.push_back(Mask1);
15354     Value *AllElts = Builder.CreateCall(Vperm, Ops, "shuffle1");
15355     Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
15356 
15357     if (IsLE) {
15358       SmallVector<int, 16> Consts;
15359       for (int Idx = 0; Idx < 16; Idx++) {
15360         int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1)
15361                                             : 16 - (NumBytes - Idx);
15362         Consts.push_back(Val);
15363       }
15364       return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy),
15365                                          Zero, Consts);
15366     }
15367     SmallVector<Constant *, 16> Consts;
15368     for (int Idx = 0; Idx < 16; Idx++)
15369       Consts.push_back(Builder.getInt8(NumBytes + Idx));
15370     Value *Mask2 = ConstantVector::get(Consts);
15371     return Builder.CreateBitCast(
15372         Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy);
15373   }
15374   case PPC::BI__builtin_vsx_strmb: {
15375     int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
15376     bool IsLE = getTarget().isLittleEndian();
15377     auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) {
15378       // Storing the whole vector, simply store it on BE and reverse bytes and
15379       // store on LE.
15380       if (Width == 16) {
15381         Value *BC =
15382             Builder.CreateBitCast(Ops[0], Ops[2]->getType()->getPointerTo());
15383         Value *StVec = Ops[2];
15384         if (IsLE) {
15385           SmallVector<int, 16> RevMask;
15386           for (int Idx = 0; Idx < 16; Idx++)
15387             RevMask.push_back(15 - Idx);
15388           StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask);
15389         }
15390         return Builder.CreateStore(StVec,
15391                                    Address(BC, CharUnits::fromQuantity(1)));
15392       }
15393       auto *ConvTy = Int64Ty;
15394       unsigned NumElts = 0;
15395       switch (Width) {
15396       default:
15397         llvm_unreachable("width for stores must be a power of 2");
15398       case 8:
15399         ConvTy = Int64Ty;
15400         NumElts = 2;
15401         break;
15402       case 4:
15403         ConvTy = Int32Ty;
15404         NumElts = 4;
15405         break;
15406       case 2:
15407         ConvTy = Int16Ty;
15408         NumElts = 8;
15409         break;
15410       case 1:
15411         ConvTy = Int8Ty;
15412         NumElts = 16;
15413         break;
15414       }
15415       Value *Vec = Builder.CreateBitCast(
15416           Ops[2], llvm::FixedVectorType::get(ConvTy, NumElts));
15417       Value *Ptr = Builder.CreateGEP(Int8Ty, Ops[0],
15418                                      ConstantInt::get(Int64Ty, Offset));
15419       Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo());
15420       Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
15421       if (IsLE && Width > 1) {
15422         Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
15423         Elt = Builder.CreateCall(F, Elt);
15424       }
15425       return Builder.CreateStore(Elt,
15426                                  Address(PtrBC, CharUnits::fromQuantity(1)));
15427     };
15428     unsigned Stored = 0;
15429     unsigned RemainingBytes = NumBytes;
15430     Value *Result;
15431     if (NumBytes == 16)
15432       return StoreSubVec(16, 0, 0);
15433     if (NumBytes >= 8) {
15434       Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1);
15435       RemainingBytes -= 8;
15436       Stored += 8;
15437     }
15438     if (RemainingBytes >= 4) {
15439       Result = StoreSubVec(4, NumBytes - Stored - 4,
15440                            IsLE ? (Stored >> 2) : 3 - (Stored >> 2));
15441       RemainingBytes -= 4;
15442       Stored += 4;
15443     }
15444     if (RemainingBytes >= 2) {
15445       Result = StoreSubVec(2, NumBytes - Stored - 2,
15446                            IsLE ? (Stored >> 1) : 7 - (Stored >> 1));
15447       RemainingBytes -= 2;
15448       Stored += 2;
15449     }
15450     if (RemainingBytes)
15451       Result =
15452           StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored);
15453     return Result;
15454   }
15455   // Square root
15456   case PPC::BI__builtin_vsx_xvsqrtsp:
15457   case PPC::BI__builtin_vsx_xvsqrtdp: {
15458     llvm::Type *ResultType = ConvertType(E->getType());
15459     Value *X = EmitScalarExpr(E->getArg(0));
15460     if (Builder.getIsFPConstrained()) {
15461       llvm::Function *F = CGM.getIntrinsic(
15462           Intrinsic::experimental_constrained_sqrt, ResultType);
15463       return Builder.CreateConstrainedFPCall(F, X);
15464     } else {
15465       llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15466       return Builder.CreateCall(F, X);
15467     }
15468   }
15469   // Count leading zeros
15470   case PPC::BI__builtin_altivec_vclzb:
15471   case PPC::BI__builtin_altivec_vclzh:
15472   case PPC::BI__builtin_altivec_vclzw:
15473   case PPC::BI__builtin_altivec_vclzd: {
15474     llvm::Type *ResultType = ConvertType(E->getType());
15475     Value *X = EmitScalarExpr(E->getArg(0));
15476     Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15477     Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
15478     return Builder.CreateCall(F, {X, Undef});
15479   }
15480   case PPC::BI__builtin_altivec_vctzb:
15481   case PPC::BI__builtin_altivec_vctzh:
15482   case PPC::BI__builtin_altivec_vctzw:
15483   case PPC::BI__builtin_altivec_vctzd: {
15484     llvm::Type *ResultType = ConvertType(E->getType());
15485     Value *X = EmitScalarExpr(E->getArg(0));
15486     Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
15487     Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
15488     return Builder.CreateCall(F, {X, Undef});
15489   }
15490   case PPC::BI__builtin_altivec_vec_replace_elt:
15491   case PPC::BI__builtin_altivec_vec_replace_unaligned: {
15492     // The third argument of vec_replace_elt and vec_replace_unaligned must
15493     // be a compile time constant and will be emitted either to the vinsw
15494     // or vinsd instruction.
15495     ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15496     assert(ArgCI &&
15497            "Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
15498     llvm::Type *ResultType = ConvertType(E->getType());
15499     llvm::Function *F = nullptr;
15500     Value *Call = nullptr;
15501     int64_t ConstArg = ArgCI->getSExtValue();
15502     unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
15503     bool Is32Bit = false;
15504     assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width");
15505     // The input to vec_replace_elt is an element index, not a byte index.
15506     if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
15507       ConstArg *= ArgWidth / 8;
15508     if (ArgWidth == 32) {
15509       Is32Bit = true;
15510       // When the second argument is 32 bits, it can either be an integer or
15511       // a float. The vinsw intrinsic is used in this case.
15512       F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
15513       // Fix the constant according to endianess.
15514       if (getTarget().isLittleEndian())
15515         ConstArg = 12 - ConstArg;
15516     } else {
15517       // When the second argument is 64 bits, it can either be a long long or
15518       // a double. The vinsd intrinsic is used in this case.
15519       F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
15520       // Fix the constant for little endian.
15521       if (getTarget().isLittleEndian())
15522         ConstArg = 8 - ConstArg;
15523     }
15524     Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
15525     // Depending on ArgWidth, the input vector could be a float or a double.
15526     // If the input vector is a float type, bitcast the inputs to integers. Or,
15527     // if the input vector is a double, bitcast the inputs to 64-bit integers.
15528     if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
15529       Ops[0] = Builder.CreateBitCast(
15530           Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
15531                           : llvm::FixedVectorType::get(Int64Ty, 2));
15532       Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
15533     }
15534     // Emit the call to vinsw or vinsd.
15535     Call = Builder.CreateCall(F, Ops);
15536     // Depending on the builtin, bitcast to the approriate result type.
15537     if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
15538         !Ops[1]->getType()->isIntegerTy())
15539       return Builder.CreateBitCast(Call, ResultType);
15540     else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
15541              Ops[1]->getType()->isIntegerTy())
15542       return Call;
15543     else
15544       return Builder.CreateBitCast(Call,
15545                                    llvm::FixedVectorType::get(Int8Ty, 16));
15546   }
15547   case PPC::BI__builtin_altivec_vpopcntb:
15548   case PPC::BI__builtin_altivec_vpopcnth:
15549   case PPC::BI__builtin_altivec_vpopcntw:
15550   case PPC::BI__builtin_altivec_vpopcntd: {
15551     llvm::Type *ResultType = ConvertType(E->getType());
15552     Value *X = EmitScalarExpr(E->getArg(0));
15553     llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
15554     return Builder.CreateCall(F, X);
15555   }
15556   case PPC::BI__builtin_altivec_vadduqm:
15557   case PPC::BI__builtin_altivec_vsubuqm: {
15558     llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
15559     Ops[0] =
15560         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1));
15561     Ops[1] =
15562         Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1));
15563     if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
15564       return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm");
15565     else
15566       return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm");
15567   }
15568   // Rotate and insert under mask operation.
15569   // __rldimi(rs, is, shift, mask)
15570   // (rotl64(rs, shift) & mask) | (is & ~mask)
15571   // __rlwimi(rs, is, shift, mask)
15572   // (rotl(rs, shift) & mask) | (is & ~mask)
15573   case PPC::BI__builtin_ppc_rldimi:
15574   case PPC::BI__builtin_ppc_rlwimi: {
15575     llvm::Type *Ty = Ops[0]->getType();
15576     Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
15577     if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
15578       Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
15579     Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[2]});
15580     Value *X = Builder.CreateAnd(Shift, Ops[3]);
15581     Value *Y = Builder.CreateAnd(Ops[1], Builder.CreateNot(Ops[3]));
15582     return Builder.CreateOr(X, Y);
15583   }
15584   // Rotate and insert under mask operation.
15585   // __rlwnm(rs, shift, mask)
15586   // rotl(rs, shift) & mask
15587   case PPC::BI__builtin_ppc_rlwnm: {
15588     llvm::Type *Ty = Ops[0]->getType();
15589     Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
15590     Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[1]});
15591     return Builder.CreateAnd(Shift, Ops[2]);
15592   }
15593   case PPC::BI__builtin_ppc_poppar4:
15594   case PPC::BI__builtin_ppc_poppar8: {
15595     llvm::Type *ArgType = Ops[0]->getType();
15596     Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
15597     Value *Tmp = Builder.CreateCall(F, Ops[0]);
15598 
15599     llvm::Type *ResultType = ConvertType(E->getType());
15600     Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
15601     if (Result->getType() != ResultType)
15602       Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
15603                                      "cast");
15604     return Result;
15605   }
15606   case PPC::BI__builtin_ppc_cmpb: {
15607     if (getTarget().getTriple().isPPC64()) {
15608       Function *F =
15609           CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty});
15610       return Builder.CreateCall(F, Ops, "cmpb");
15611     }
15612     // For 32 bit, emit the code as below:
15613     // %conv = trunc i64 %a to i32
15614     // %conv1 = trunc i64 %b to i32
15615     // %shr = lshr i64 %a, 32
15616     // %conv2 = trunc i64 %shr to i32
15617     // %shr3 = lshr i64 %b, 32
15618     // %conv4 = trunc i64 %shr3 to i32
15619     // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1)
15620     // %conv5 = zext i32 %0 to i64
15621     // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4)
15622     // %conv614 = zext i32 %1 to i64
15623     // %shl = shl nuw i64 %conv614, 32
15624     // %or = or i64 %shl, %conv5
15625     // ret i64 %or
15626     Function *F =
15627         CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty});
15628     Value *ArgOneLo = Builder.CreateTrunc(Ops[0], Int32Ty);
15629     Value *ArgTwoLo = Builder.CreateTrunc(Ops[1], Int32Ty);
15630     Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32);
15631     Value *ArgOneHi =
15632         Builder.CreateTrunc(Builder.CreateLShr(Ops[0], ShiftAmt), Int32Ty);
15633     Value *ArgTwoHi =
15634         Builder.CreateTrunc(Builder.CreateLShr(Ops[1], ShiftAmt), Int32Ty);
15635     Value *ResLo = Builder.CreateZExt(
15636         Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty);
15637     Value *ResHiShift = Builder.CreateZExt(
15638         Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty);
15639     Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt);
15640     return Builder.CreateOr(ResLo, ResHi);
15641   }
15642   // Copy sign
15643   case PPC::BI__builtin_vsx_xvcpsgnsp:
15644   case PPC::BI__builtin_vsx_xvcpsgndp: {
15645     llvm::Type *ResultType = ConvertType(E->getType());
15646     Value *X = EmitScalarExpr(E->getArg(0));
15647     Value *Y = EmitScalarExpr(E->getArg(1));
15648     ID = Intrinsic::copysign;
15649     llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
15650     return Builder.CreateCall(F, {X, Y});
15651   }
15652   // Rounding/truncation
15653   case PPC::BI__builtin_vsx_xvrspip:
15654   case PPC::BI__builtin_vsx_xvrdpip:
15655   case PPC::BI__builtin_vsx_xvrdpim:
15656   case PPC::BI__builtin_vsx_xvrspim:
15657   case PPC::BI__builtin_vsx_xvrdpi:
15658   case PPC::BI__builtin_vsx_xvrspi:
15659   case PPC::BI__builtin_vsx_xvrdpic:
15660   case PPC::BI__builtin_vsx_xvrspic:
15661   case PPC::BI__builtin_vsx_xvrdpiz:
15662   case PPC::BI__builtin_vsx_xvrspiz: {
15663     llvm::Type *ResultType = ConvertType(E->getType());
15664     Value *X = EmitScalarExpr(E->getArg(0));
15665     if (BuiltinID == PPC::BI__builtin_vsx_xvrdpim ||
15666         BuiltinID == PPC::BI__builtin_vsx_xvrspim)
15667       ID = Builder.getIsFPConstrained()
15668                ? Intrinsic::experimental_constrained_floor
15669                : Intrinsic::floor;
15670     else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpi ||
15671              BuiltinID == PPC::BI__builtin_vsx_xvrspi)
15672       ID = Builder.getIsFPConstrained()
15673                ? Intrinsic::experimental_constrained_round
15674                : Intrinsic::round;
15675     else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpic ||
15676              BuiltinID == PPC::BI__builtin_vsx_xvrspic)
15677       ID = Builder.getIsFPConstrained()
15678                ? Intrinsic::experimental_constrained_rint
15679                : Intrinsic::rint;
15680     else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpip ||
15681              BuiltinID == PPC::BI__builtin_vsx_xvrspip)
15682       ID = Builder.getIsFPConstrained()
15683                ? Intrinsic::experimental_constrained_ceil
15684                : Intrinsic::ceil;
15685     else if (BuiltinID == PPC::BI__builtin_vsx_xvrdpiz ||
15686              BuiltinID == PPC::BI__builtin_vsx_xvrspiz)
15687       ID = Builder.getIsFPConstrained()
15688                ? Intrinsic::experimental_constrained_trunc
15689                : Intrinsic::trunc;
15690     llvm::Function *F = CGM.getIntrinsic(ID, ResultType);
15691     return Builder.getIsFPConstrained() ? Builder.CreateConstrainedFPCall(F, X)
15692                                         : Builder.CreateCall(F, X);
15693   }
15694 
15695   // Absolute value
15696   case PPC::BI__builtin_vsx_xvabsdp:
15697   case PPC::BI__builtin_vsx_xvabssp: {
15698     llvm::Type *ResultType = ConvertType(E->getType());
15699     Value *X = EmitScalarExpr(E->getArg(0));
15700     llvm::Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
15701     return Builder.CreateCall(F, X);
15702   }
15703 
15704   // Fastmath by default
15705   case PPC::BI__builtin_ppc_recipdivf:
15706   case PPC::BI__builtin_ppc_recipdivd:
15707   case PPC::BI__builtin_ppc_rsqrtf:
15708   case PPC::BI__builtin_ppc_rsqrtd: {
15709     FastMathFlags FMF = Builder.getFastMathFlags();
15710     Builder.getFastMathFlags().setFast();
15711     llvm::Type *ResultType = ConvertType(E->getType());
15712     Value *X = EmitScalarExpr(E->getArg(0));
15713 
15714     if (BuiltinID == PPC::BI__builtin_ppc_recipdivf ||
15715         BuiltinID == PPC::BI__builtin_ppc_recipdivd) {
15716       Value *Y = EmitScalarExpr(E->getArg(1));
15717       Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv");
15718       Builder.getFastMathFlags() &= (FMF);
15719       return FDiv;
15720     }
15721     auto *One = ConstantFP::get(ResultType, 1.0);
15722     llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
15723     Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt");
15724     Builder.getFastMathFlags() &= (FMF);
15725     return FDiv;
15726   }
15727   case PPC::BI__builtin_ppc_alignx: {
15728     ConstantInt *AlignmentCI = cast<ConstantInt>(Ops[0]);
15729     if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
15730       AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
15731                                      llvm::Value::MaximumAlignment);
15732 
15733     emitAlignmentAssumption(Ops[1], E->getArg(1),
15734                             /*The expr loc is sufficient.*/ SourceLocation(),
15735                             AlignmentCI, nullptr);
15736     return Ops[1];
15737   }
15738   case PPC::BI__builtin_ppc_rdlam: {
15739     llvm::Type *Ty = Ops[0]->getType();
15740     Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false);
15741     Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
15742     Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt});
15743     return Builder.CreateAnd(Rotate, Ops[2]);
15744   }
15745   case PPC::BI__builtin_ppc_load2r: {
15746     Function *F = CGM.getIntrinsic(Intrinsic::ppc_load2r);
15747     Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
15748     Value *LoadIntrinsic = Builder.CreateCall(F, Ops);
15749     return Builder.CreateTrunc(LoadIntrinsic, Int16Ty);
15750   }
15751   // FMA variations
15752   case PPC::BI__builtin_vsx_xvmaddadp:
15753   case PPC::BI__builtin_vsx_xvmaddasp:
15754   case PPC::BI__builtin_vsx_xvnmaddadp:
15755   case PPC::BI__builtin_vsx_xvnmaddasp:
15756   case PPC::BI__builtin_vsx_xvmsubadp:
15757   case PPC::BI__builtin_vsx_xvmsubasp:
15758   case PPC::BI__builtin_vsx_xvnmsubadp:
15759   case PPC::BI__builtin_vsx_xvnmsubasp: {
15760     llvm::Type *ResultType = ConvertType(E->getType());
15761     Value *X = EmitScalarExpr(E->getArg(0));
15762     Value *Y = EmitScalarExpr(E->getArg(1));
15763     Value *Z = EmitScalarExpr(E->getArg(2));
15764     llvm::Function *F;
15765     if (Builder.getIsFPConstrained())
15766       F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
15767     else
15768       F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
15769     switch (BuiltinID) {
15770       case PPC::BI__builtin_vsx_xvmaddadp:
15771       case PPC::BI__builtin_vsx_xvmaddasp:
15772         if (Builder.getIsFPConstrained())
15773           return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
15774         else
15775           return Builder.CreateCall(F, {X, Y, Z});
15776       case PPC::BI__builtin_vsx_xvnmaddadp:
15777       case PPC::BI__builtin_vsx_xvnmaddasp:
15778         if (Builder.getIsFPConstrained())
15779           return Builder.CreateFNeg(
15780               Builder.CreateConstrainedFPCall(F, {X, Y, Z}), "neg");
15781         else
15782           return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
15783       case PPC::BI__builtin_vsx_xvmsubadp:
15784       case PPC::BI__builtin_vsx_xvmsubasp:
15785         if (Builder.getIsFPConstrained())
15786           return Builder.CreateConstrainedFPCall(
15787               F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15788         else
15789           return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
15790       case PPC::BI__builtin_vsx_xvnmsubadp:
15791       case PPC::BI__builtin_vsx_xvnmsubasp:
15792         if (Builder.getIsFPConstrained())
15793           return Builder.CreateFNeg(
15794               Builder.CreateConstrainedFPCall(
15795                   F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
15796               "neg");
15797         else
15798           return Builder.CreateFNeg(
15799               Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
15800               "neg");
15801     }
15802     llvm_unreachable("Unknown FMA operation");
15803     return nullptr; // Suppress no-return warning
15804   }
15805 
15806   case PPC::BI__builtin_vsx_insertword: {
15807     llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
15808 
15809     // Third argument is a compile time constant int. It must be clamped to
15810     // to the range [0, 12].
15811     ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15812     assert(ArgCI &&
15813            "Third arg to xxinsertw intrinsic must be constant integer");
15814     const int64_t MaxIndex = 12;
15815     int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
15816 
15817     // The builtin semantics don't exactly match the xxinsertw instructions
15818     // semantics (which ppc_vsx_xxinsertw follows). The builtin extracts the
15819     // word from the first argument, and inserts it in the second argument. The
15820     // instruction extracts the word from its second input register and inserts
15821     // it into its first input register, so swap the first and second arguments.
15822     std::swap(Ops[0], Ops[1]);
15823 
15824     // Need to cast the second argument from a vector of unsigned int to a
15825     // vector of long long.
15826     Ops[1] =
15827         Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
15828 
15829     if (getTarget().isLittleEndian()) {
15830       // Reverse the double words in the vector we will extract from.
15831       Ops[0] =
15832           Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15833       Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
15834 
15835       // Reverse the index.
15836       Index = MaxIndex - Index;
15837     }
15838 
15839     // Intrinsic expects the first arg to be a vector of int.
15840     Ops[0] =
15841         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
15842     Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
15843     return Builder.CreateCall(F, Ops);
15844   }
15845 
15846   case PPC::BI__builtin_vsx_extractuword: {
15847     llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
15848 
15849     // Intrinsic expects the first argument to be a vector of doublewords.
15850     Ops[0] =
15851         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15852 
15853     // The second argument is a compile time constant int that needs to
15854     // be clamped to the range [0, 12].
15855     ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
15856     assert(ArgCI &&
15857            "Second Arg to xxextractuw intrinsic must be a constant integer!");
15858     const int64_t MaxIndex = 12;
15859     int64_t Index = clamp(ArgCI->getSExtValue(), 0, MaxIndex);
15860 
15861     if (getTarget().isLittleEndian()) {
15862       // Reverse the index.
15863       Index = MaxIndex - Index;
15864       Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
15865 
15866       // Emit the call, then reverse the double words of the results vector.
15867       Value *Call = Builder.CreateCall(F, Ops);
15868 
15869       Value *ShuffleCall =
15870           Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
15871       return ShuffleCall;
15872     } else {
15873       Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
15874       return Builder.CreateCall(F, Ops);
15875     }
15876   }
15877 
15878   case PPC::BI__builtin_vsx_xxpermdi: {
15879     ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15880     assert(ArgCI && "Third arg must be constant integer!");
15881 
15882     unsigned Index = ArgCI->getZExtValue();
15883     Ops[0] =
15884         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
15885     Ops[1] =
15886         Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
15887 
15888     // Account for endianness by treating this as just a shuffle. So we use the
15889     // same indices for both LE and BE in order to produce expected results in
15890     // both cases.
15891     int ElemIdx0 = (Index & 2) >> 1;
15892     int ElemIdx1 = 2 + (Index & 1);
15893 
15894     int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
15895     Value *ShuffleCall =
15896         Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
15897     QualType BIRetType = E->getType();
15898     auto RetTy = ConvertType(BIRetType);
15899     return Builder.CreateBitCast(ShuffleCall, RetTy);
15900   }
15901 
15902   case PPC::BI__builtin_vsx_xxsldwi: {
15903     ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
15904     assert(ArgCI && "Third argument must be a compile time constant");
15905     unsigned Index = ArgCI->getZExtValue() & 0x3;
15906     Ops[0] =
15907         Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
15908     Ops[1] =
15909         Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
15910 
15911     // Create a shuffle mask
15912     int ElemIdx0;
15913     int ElemIdx1;
15914     int ElemIdx2;
15915     int ElemIdx3;
15916     if (getTarget().isLittleEndian()) {
15917       // Little endian element N comes from element 8+N-Index of the
15918       // concatenated wide vector (of course, using modulo arithmetic on
15919       // the total number of elements).
15920       ElemIdx0 = (8 - Index) % 8;
15921       ElemIdx1 = (9 - Index) % 8;
15922       ElemIdx2 = (10 - Index) % 8;
15923       ElemIdx3 = (11 - Index) % 8;
15924     } else {
15925       // Big endian ElemIdx<N> = Index + N
15926       ElemIdx0 = Index;
15927       ElemIdx1 = Index + 1;
15928       ElemIdx2 = Index + 2;
15929       ElemIdx3 = Index + 3;
15930     }
15931 
15932     int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
15933     Value *ShuffleCall =
15934         Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
15935     QualType BIRetType = E->getType();
15936     auto RetTy = ConvertType(BIRetType);
15937     return Builder.CreateBitCast(ShuffleCall, RetTy);
15938   }
15939 
15940   case PPC::BI__builtin_pack_vector_int128: {
15941     bool isLittleEndian = getTarget().isLittleEndian();
15942     Value *UndefValue =
15943         llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
15944     Value *Res = Builder.CreateInsertElement(
15945         UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
15946     Res = Builder.CreateInsertElement(Res, Ops[1],
15947                                       (uint64_t)(isLittleEndian ? 0 : 1));
15948     return Builder.CreateBitCast(Res, ConvertType(E->getType()));
15949   }
15950 
15951   case PPC::BI__builtin_unpack_vector_int128: {
15952     ConstantInt *Index = cast<ConstantInt>(Ops[1]);
15953     Value *Unpacked = Builder.CreateBitCast(
15954         Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
15955 
15956     if (getTarget().isLittleEndian())
15957       Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
15958 
15959     return Builder.CreateExtractElement(Unpacked, Index);
15960   }
15961 
15962   case PPC::BI__builtin_ppc_sthcx: {
15963     llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx);
15964     Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
15965     Ops[1] = Builder.CreateSExt(Ops[1], Int32Ty);
15966     return Builder.CreateCall(F, Ops);
15967   }
15968 
15969   // The PPC MMA builtins take a pointer to a __vector_quad as an argument.
15970   // Some of the MMA instructions accumulate their result into an existing
15971   // accumulator whereas the others generate a new accumulator. So we need to
15972   // use custom code generation to expand a builtin call with a pointer to a
15973   // load (if the corresponding instruction accumulates its result) followed by
15974   // the call to the intrinsic and a store of the result.
15975 #define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \
15976   case PPC::BI__builtin_##Name:
15977 #include "clang/Basic/BuiltinsPPC.def"
15978   {
15979     // The first argument of these two builtins is a pointer used to store their
15980     // result. However, the llvm intrinsics return their result in multiple
15981     // return values. So, here we emit code extracting these values from the
15982     // intrinsic results and storing them using that pointer.
15983     if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
15984         BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
15985         BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
15986       unsigned NumVecs = 2;
15987       auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
15988       if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
15989         NumVecs = 4;
15990         Intrinsic = Intrinsic::ppc_mma_disassemble_acc;
15991       }
15992       llvm::Function *F = CGM.getIntrinsic(Intrinsic);
15993       Address Addr = EmitPointerWithAlignment(E->getArg(1));
15994       Value *Vec = Builder.CreateLoad(Addr);
15995       Value *Call = Builder.CreateCall(F, {Vec});
15996       llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, 16);
15997       Value *Ptr = Builder.CreateBitCast(Ops[0], VTy->getPointerTo());
15998       for (unsigned i=0; i<NumVecs; i++) {
15999         Value *Vec = Builder.CreateExtractValue(Call, i);
16000         llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
16001         Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
16002         Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
16003       }
16004       return Call;
16005     }
16006     if (BuiltinID == PPC::BI__builtin_vsx_build_pair ||
16007         BuiltinID == PPC::BI__builtin_mma_build_acc) {
16008       // Reverse the order of the operands for LE, so the
16009       // same builtin call can be used on both LE and BE
16010       // without the need for the programmer to swap operands.
16011       // The operands are reversed starting from the second argument,
16012       // the first operand is the pointer to the pair/accumulator
16013       // that is being built.
16014       if (getTarget().isLittleEndian())
16015         std::reverse(Ops.begin() + 1, Ops.end());
16016     }
16017     bool Accumulate;
16018     switch (BuiltinID) {
16019   #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
16020     case PPC::BI__builtin_##Name: \
16021       ID = Intrinsic::ppc_##Intr; \
16022       Accumulate = Acc; \
16023       break;
16024   #include "clang/Basic/BuiltinsPPC.def"
16025     }
16026     if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
16027         BuiltinID == PPC::BI__builtin_vsx_stxvp ||
16028         BuiltinID == PPC::BI__builtin_mma_lxvp ||
16029         BuiltinID == PPC::BI__builtin_mma_stxvp) {
16030       if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
16031           BuiltinID == PPC::BI__builtin_mma_lxvp) {
16032         Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
16033         Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
16034       } else {
16035         Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
16036         Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
16037       }
16038       Ops.pop_back();
16039       llvm::Function *F = CGM.getIntrinsic(ID);
16040       return Builder.CreateCall(F, Ops, "");
16041     }
16042     SmallVector<Value*, 4> CallOps;
16043     if (Accumulate) {
16044       Address Addr = EmitPointerWithAlignment(E->getArg(0));
16045       Value *Acc = Builder.CreateLoad(Addr);
16046       CallOps.push_back(Acc);
16047     }
16048     for (unsigned i=1; i<Ops.size(); i++)
16049       CallOps.push_back(Ops[i]);
16050     llvm::Function *F = CGM.getIntrinsic(ID);
16051     Value *Call = Builder.CreateCall(F, CallOps);
16052     return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
16053   }
16054 
16055   case PPC::BI__builtin_ppc_compare_and_swap:
16056   case PPC::BI__builtin_ppc_compare_and_swaplp: {
16057     Address Addr = EmitPointerWithAlignment(E->getArg(0));
16058     Address OldValAddr = EmitPointerWithAlignment(E->getArg(1));
16059     Value *OldVal = Builder.CreateLoad(OldValAddr);
16060     QualType AtomicTy = E->getArg(0)->getType()->getPointeeType();
16061     LValue LV = MakeAddrLValue(Addr, AtomicTy);
16062     auto Pair = EmitAtomicCompareExchange(
16063         LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(),
16064         llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true);
16065     // Unlike c11's atomic_compare_exchange, accroding to
16066     // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
16067     // > In either case, the contents of the memory location specified by addr
16068     // > are copied into the memory location specified by old_val_addr.
16069     // But it hasn't specified storing to OldValAddr is atomic or not and
16070     // which order to use. Now following XL's codegen, treat it as a normal
16071     // store.
16072     Value *LoadedVal = Pair.first.getScalarVal();
16073     Builder.CreateStore(LoadedVal, OldValAddr);
16074     return Builder.CreateZExt(Pair.second, Builder.getInt32Ty());
16075   }
16076   case PPC::BI__builtin_ppc_fetch_and_add:
16077   case PPC::BI__builtin_ppc_fetch_and_addlp: {
16078     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
16079                                  llvm::AtomicOrdering::Monotonic);
16080   }
16081   case PPC::BI__builtin_ppc_fetch_and_and:
16082   case PPC::BI__builtin_ppc_fetch_and_andlp: {
16083     return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
16084                                  llvm::AtomicOrdering::Monotonic);
16085   }
16086 
16087   case PPC::BI__builtin_ppc_fetch_and_or:
16088   case PPC::BI__builtin_ppc_fetch_and_orlp: {
16089     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
16090                                  llvm::AtomicOrdering::Monotonic);
16091   }
16092   case PPC::BI__builtin_ppc_fetch_and_swap:
16093   case PPC::BI__builtin_ppc_fetch_and_swaplp: {
16094     return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
16095                                  llvm::AtomicOrdering::Monotonic);
16096   }
16097   case PPC::BI__builtin_ppc_ldarx:
16098   case PPC::BI__builtin_ppc_lwarx:
16099   case PPC::BI__builtin_ppc_lharx:
16100   case PPC::BI__builtin_ppc_lbarx:
16101     return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E);
16102   case PPC::BI__builtin_ppc_mfspr: {
16103     llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
16104                               ? Int32Ty
16105                               : Int64Ty;
16106     Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType);
16107     return Builder.CreateCall(F, Ops);
16108   }
16109   case PPC::BI__builtin_ppc_mtspr: {
16110     llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
16111                               ? Int32Ty
16112                               : Int64Ty;
16113     Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType);
16114     return Builder.CreateCall(F, Ops);
16115   }
16116   case PPC::BI__builtin_ppc_popcntb: {
16117     Value *ArgValue = EmitScalarExpr(E->getArg(0));
16118     llvm::Type *ArgType = ArgValue->getType();
16119     Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType});
16120     return Builder.CreateCall(F, Ops, "popcntb");
16121   }
16122   case PPC::BI__builtin_ppc_mtfsf: {
16123     // The builtin takes a uint32 that needs to be cast to an
16124     // f64 to be passed to the intrinsic.
16125     Value *Cast = Builder.CreateUIToFP(Ops[1], DoubleTy);
16126     llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf);
16127     return Builder.CreateCall(F, {Ops[0], Cast}, "");
16128   }
16129 
16130   case PPC::BI__builtin_ppc_swdiv_nochk:
16131   case PPC::BI__builtin_ppc_swdivs_nochk: {
16132     FastMathFlags FMF = Builder.getFastMathFlags();
16133     Builder.getFastMathFlags().setFast();
16134     Value *FDiv = Builder.CreateFDiv(Ops[0], Ops[1], "swdiv_nochk");
16135     Builder.getFastMathFlags() &= (FMF);
16136     return FDiv;
16137   }
16138   case PPC::BI__builtin_ppc_fric:
16139     return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16140                            *this, E, Intrinsic::rint,
16141                            Intrinsic::experimental_constrained_rint))
16142         .getScalarVal();
16143   case PPC::BI__builtin_ppc_frim:
16144   case PPC::BI__builtin_ppc_frims:
16145     return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16146                            *this, E, Intrinsic::floor,
16147                            Intrinsic::experimental_constrained_floor))
16148         .getScalarVal();
16149   case PPC::BI__builtin_ppc_frin:
16150   case PPC::BI__builtin_ppc_frins:
16151     return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16152                            *this, E, Intrinsic::round,
16153                            Intrinsic::experimental_constrained_round))
16154         .getScalarVal();
16155   case PPC::BI__builtin_ppc_frip:
16156   case PPC::BI__builtin_ppc_frips:
16157     return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16158                            *this, E, Intrinsic::ceil,
16159                            Intrinsic::experimental_constrained_ceil))
16160         .getScalarVal();
16161   case PPC::BI__builtin_ppc_friz:
16162   case PPC::BI__builtin_ppc_frizs:
16163     return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16164                            *this, E, Intrinsic::trunc,
16165                            Intrinsic::experimental_constrained_trunc))
16166         .getScalarVal();
16167   case PPC::BI__builtin_ppc_fsqrt:
16168   case PPC::BI__builtin_ppc_fsqrts:
16169     return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
16170                            *this, E, Intrinsic::sqrt,
16171                            Intrinsic::experimental_constrained_sqrt))
16172         .getScalarVal();
16173   case PPC::BI__builtin_ppc_test_data_class: {
16174     llvm::Type *ArgType = EmitScalarExpr(E->getArg(0))->getType();
16175     unsigned IntrinsicID;
16176     if (ArgType->isDoubleTy())
16177       IntrinsicID = Intrinsic::ppc_test_data_class_d;
16178     else if (ArgType->isFloatTy())
16179       IntrinsicID = Intrinsic::ppc_test_data_class_f;
16180     else
16181       llvm_unreachable("Invalid Argument Type");
16182     return Builder.CreateCall(CGM.getIntrinsic(IntrinsicID), Ops,
16183                               "test_data_class");
16184   }
16185   case PPC::BI__builtin_ppc_swdiv:
16186   case PPC::BI__builtin_ppc_swdivs:
16187     return Builder.CreateFDiv(Ops[0], Ops[1], "swdiv");
16188   }
16189 }
16190 
16191 namespace {
16192 // If \p E is not null pointer, insert address space cast to match return
16193 // type of \p E if necessary.
16194 Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
16195                              const CallExpr *E = nullptr) {
16196   auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_dispatch_ptr);
16197   auto *Call = CGF.Builder.CreateCall(F);
16198   Call->addRetAttr(
16199       Attribute::getWithDereferenceableBytes(Call->getContext(), 64));
16200   Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(4)));
16201   if (!E)
16202     return Call;
16203   QualType BuiltinRetType = E->getType();
16204   auto *RetTy = cast<llvm::PointerType>(CGF.ConvertType(BuiltinRetType));
16205   if (RetTy == Call->getType())
16206     return Call;
16207   return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
16208 }
16209 
16210 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
16211 Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
16212   const unsigned XOffset = 4;
16213   auto *DP = EmitAMDGPUDispatchPtr(CGF);
16214   // Indexing the HSA kernel_dispatch_packet struct.
16215   auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
16216   auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
16217   auto *DstTy =
16218       CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
16219   auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
16220   auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(2)));
16221   llvm::MDBuilder MDHelper(CGF.getLLVMContext());
16222   llvm::MDNode *RNode = MDHelper.createRange(APInt(16, 1),
16223       APInt(16, CGF.getTarget().getMaxOpenCLWorkGroupSize() + 1));
16224   LD->setMetadata(llvm::LLVMContext::MD_range, RNode);
16225   LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
16226       llvm::MDNode::get(CGF.getLLVMContext(), None));
16227   return LD;
16228 }
16229 
16230 // \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
16231 Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
16232   const unsigned XOffset = 12;
16233   auto *DP = EmitAMDGPUDispatchPtr(CGF);
16234   // Indexing the HSA kernel_dispatch_packet struct.
16235   auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
16236   auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
16237   auto *DstTy =
16238       CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
16239   auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
16240   auto *LD = CGF.Builder.CreateLoad(Address(Cast, CharUnits::fromQuantity(4)));
16241   LD->setMetadata(llvm::LLVMContext::MD_invariant_load,
16242                   llvm::MDNode::get(CGF.getLLVMContext(), None));
16243   return LD;
16244 }
16245 } // namespace
16246 
16247 // For processing memory ordering and memory scope arguments of various
16248 // amdgcn builtins.
16249 // \p Order takes a C++11 comptabile memory-ordering specifier and converts
16250 // it into LLVM's memory ordering specifier using atomic C ABI, and writes
16251 // to \p AO. \p Scope takes a const char * and converts it into AMDGCN
16252 // specific SyncScopeID and writes it to \p SSID.
16253 bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
16254                                               llvm::AtomicOrdering &AO,
16255                                               llvm::SyncScope::ID &SSID) {
16256   if (isa<llvm::ConstantInt>(Order)) {
16257     int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
16258 
16259     // Map C11/C++11 memory ordering to LLVM memory ordering
16260     assert(llvm::isValidAtomicOrderingCABI(ord));
16261     switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
16262     case llvm::AtomicOrderingCABI::acquire:
16263     case llvm::AtomicOrderingCABI::consume:
16264       AO = llvm::AtomicOrdering::Acquire;
16265       break;
16266     case llvm::AtomicOrderingCABI::release:
16267       AO = llvm::AtomicOrdering::Release;
16268       break;
16269     case llvm::AtomicOrderingCABI::acq_rel:
16270       AO = llvm::AtomicOrdering::AcquireRelease;
16271       break;
16272     case llvm::AtomicOrderingCABI::seq_cst:
16273       AO = llvm::AtomicOrdering::SequentiallyConsistent;
16274       break;
16275     case llvm::AtomicOrderingCABI::relaxed:
16276       AO = llvm::AtomicOrdering::Monotonic;
16277       break;
16278     }
16279 
16280     StringRef scp;
16281     llvm::getConstantStringInfo(Scope, scp);
16282     SSID = getLLVMContext().getOrInsertSyncScopeID(scp);
16283     return true;
16284   }
16285   return false;
16286 }
16287 
16288 Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
16289                                               const CallExpr *E) {
16290   llvm::AtomicOrdering AO = llvm::AtomicOrdering::SequentiallyConsistent;
16291   llvm::SyncScope::ID SSID;
16292   switch (BuiltinID) {
16293   case AMDGPU::BI__builtin_amdgcn_div_scale:
16294   case AMDGPU::BI__builtin_amdgcn_div_scalef: {
16295     // Translate from the intrinsics's struct return to the builtin's out
16296     // argument.
16297 
16298     Address FlagOutPtr = EmitPointerWithAlignment(E->getArg(3));
16299 
16300     llvm::Value *X = EmitScalarExpr(E->getArg(0));
16301     llvm::Value *Y = EmitScalarExpr(E->getArg(1));
16302     llvm::Value *Z = EmitScalarExpr(E->getArg(2));
16303 
16304     llvm::Function *Callee = CGM.getIntrinsic(Intrinsic::amdgcn_div_scale,
16305                                            X->getType());
16306 
16307     llvm::Value *Tmp = Builder.CreateCall(Callee, {X, Y, Z});
16308 
16309     llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
16310     llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
16311 
16312     llvm::Type *RealFlagType
16313       = FlagOutPtr.getPointer()->getType()->getPointerElementType();
16314 
16315     llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
16316     Builder.CreateStore(FlagExt, FlagOutPtr);
16317     return Result;
16318   }
16319   case AMDGPU::BI__builtin_amdgcn_div_fmas:
16320   case AMDGPU::BI__builtin_amdgcn_div_fmasf: {
16321     llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16322     llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16323     llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16324     llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
16325 
16326     llvm::Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_div_fmas,
16327                                       Src0->getType());
16328     llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
16329     return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
16330   }
16331 
16332   case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
16333     return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
16334   case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
16335     return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_mov_dpp8);
16336   case AMDGPU::BI__builtin_amdgcn_mov_dpp:
16337   case AMDGPU::BI__builtin_amdgcn_update_dpp: {
16338     llvm::SmallVector<llvm::Value *, 6> Args;
16339     for (unsigned I = 0; I != E->getNumArgs(); ++I)
16340       Args.push_back(EmitScalarExpr(E->getArg(I)));
16341     assert(Args.size() == 5 || Args.size() == 6);
16342     if (Args.size() == 5)
16343       Args.insert(Args.begin(), llvm::UndefValue::get(Args[0]->getType()));
16344     Function *F =
16345         CGM.getIntrinsic(Intrinsic::amdgcn_update_dpp, Args[0]->getType());
16346     return Builder.CreateCall(F, Args);
16347   }
16348   case AMDGPU::BI__builtin_amdgcn_div_fixup:
16349   case AMDGPU::BI__builtin_amdgcn_div_fixupf:
16350   case AMDGPU::BI__builtin_amdgcn_div_fixuph:
16351     return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
16352   case AMDGPU::BI__builtin_amdgcn_trig_preop:
16353   case AMDGPU::BI__builtin_amdgcn_trig_preopf:
16354     return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
16355   case AMDGPU::BI__builtin_amdgcn_rcp:
16356   case AMDGPU::BI__builtin_amdgcn_rcpf:
16357   case AMDGPU::BI__builtin_amdgcn_rcph:
16358     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
16359   case AMDGPU::BI__builtin_amdgcn_sqrt:
16360   case AMDGPU::BI__builtin_amdgcn_sqrtf:
16361   case AMDGPU::BI__builtin_amdgcn_sqrth:
16362     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sqrt);
16363   case AMDGPU::BI__builtin_amdgcn_rsq:
16364   case AMDGPU::BI__builtin_amdgcn_rsqf:
16365   case AMDGPU::BI__builtin_amdgcn_rsqh:
16366     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
16367   case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
16368   case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
16369     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
16370   case AMDGPU::BI__builtin_amdgcn_sinf:
16371   case AMDGPU::BI__builtin_amdgcn_sinh:
16372     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
16373   case AMDGPU::BI__builtin_amdgcn_cosf:
16374   case AMDGPU::BI__builtin_amdgcn_cosh:
16375     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
16376   case AMDGPU::BI__builtin_amdgcn_dispatch_ptr:
16377     return EmitAMDGPUDispatchPtr(*this, E);
16378   case AMDGPU::BI__builtin_amdgcn_log_clampf:
16379     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
16380   case AMDGPU::BI__builtin_amdgcn_ldexp:
16381   case AMDGPU::BI__builtin_amdgcn_ldexpf:
16382   case AMDGPU::BI__builtin_amdgcn_ldexph:
16383     return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
16384   case AMDGPU::BI__builtin_amdgcn_frexp_mant:
16385   case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
16386   case AMDGPU::BI__builtin_amdgcn_frexp_manth:
16387     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
16388   case AMDGPU::BI__builtin_amdgcn_frexp_exp:
16389   case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
16390     Value *Src0 = EmitScalarExpr(E->getArg(0));
16391     Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
16392                                 { Builder.getInt32Ty(), Src0->getType() });
16393     return Builder.CreateCall(F, Src0);
16394   }
16395   case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
16396     Value *Src0 = EmitScalarExpr(E->getArg(0));
16397     Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
16398                                 { Builder.getInt16Ty(), Src0->getType() });
16399     return Builder.CreateCall(F, Src0);
16400   }
16401   case AMDGPU::BI__builtin_amdgcn_fract:
16402   case AMDGPU::BI__builtin_amdgcn_fractf:
16403   case AMDGPU::BI__builtin_amdgcn_fracth:
16404     return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
16405   case AMDGPU::BI__builtin_amdgcn_lerp:
16406     return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
16407   case AMDGPU::BI__builtin_amdgcn_ubfe:
16408     return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_ubfe);
16409   case AMDGPU::BI__builtin_amdgcn_sbfe:
16410     return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_sbfe);
16411   case AMDGPU::BI__builtin_amdgcn_uicmp:
16412   case AMDGPU::BI__builtin_amdgcn_uicmpl:
16413   case AMDGPU::BI__builtin_amdgcn_sicmp:
16414   case AMDGPU::BI__builtin_amdgcn_sicmpl: {
16415     llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16416     llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16417     llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16418 
16419     // FIXME-GFX10: How should 32 bit mask be handled?
16420     Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_icmp,
16421       { Builder.getInt64Ty(), Src0->getType() });
16422     return Builder.CreateCall(F, { Src0, Src1, Src2 });
16423   }
16424   case AMDGPU::BI__builtin_amdgcn_fcmp:
16425   case AMDGPU::BI__builtin_amdgcn_fcmpf: {
16426     llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16427     llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16428     llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16429 
16430     // FIXME-GFX10: How should 32 bit mask be handled?
16431     Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_fcmp,
16432       { Builder.getInt64Ty(), Src0->getType() });
16433     return Builder.CreateCall(F, { Src0, Src1, Src2 });
16434   }
16435   case AMDGPU::BI__builtin_amdgcn_class:
16436   case AMDGPU::BI__builtin_amdgcn_classf:
16437   case AMDGPU::BI__builtin_amdgcn_classh:
16438     return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
16439   case AMDGPU::BI__builtin_amdgcn_fmed3f:
16440   case AMDGPU::BI__builtin_amdgcn_fmed3h:
16441     return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
16442   case AMDGPU::BI__builtin_amdgcn_ds_append:
16443   case AMDGPU::BI__builtin_amdgcn_ds_consume: {
16444     Intrinsic::ID Intrin = BuiltinID == AMDGPU::BI__builtin_amdgcn_ds_append ?
16445       Intrinsic::amdgcn_ds_append : Intrinsic::amdgcn_ds_consume;
16446     Value *Src0 = EmitScalarExpr(E->getArg(0));
16447     Function *F = CGM.getIntrinsic(Intrin, { Src0->getType() });
16448     return Builder.CreateCall(F, { Src0, Builder.getFalse() });
16449   }
16450   case AMDGPU::BI__builtin_amdgcn_ds_faddf:
16451   case AMDGPU::BI__builtin_amdgcn_ds_fminf:
16452   case AMDGPU::BI__builtin_amdgcn_ds_fmaxf: {
16453     Intrinsic::ID Intrin;
16454     switch (BuiltinID) {
16455     case AMDGPU::BI__builtin_amdgcn_ds_faddf:
16456       Intrin = Intrinsic::amdgcn_ds_fadd;
16457       break;
16458     case AMDGPU::BI__builtin_amdgcn_ds_fminf:
16459       Intrin = Intrinsic::amdgcn_ds_fmin;
16460       break;
16461     case AMDGPU::BI__builtin_amdgcn_ds_fmaxf:
16462       Intrin = Intrinsic::amdgcn_ds_fmax;
16463       break;
16464     }
16465     llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16466     llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16467     llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16468     llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
16469     llvm::Value *Src4 = EmitScalarExpr(E->getArg(4));
16470     llvm::Function *F = CGM.getIntrinsic(Intrin, { Src1->getType() });
16471     llvm::FunctionType *FTy = F->getFunctionType();
16472     llvm::Type *PTy = FTy->getParamType(0);
16473     Src0 = Builder.CreatePointerBitCastOrAddrSpaceCast(Src0, PTy);
16474     return Builder.CreateCall(F, { Src0, Src1, Src2, Src3, Src4 });
16475   }
16476   case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
16477   case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
16478   case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
16479   case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
16480   case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
16481   case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
16482   case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
16483   case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: {
16484     Intrinsic::ID IID;
16485     llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
16486     switch (BuiltinID) {
16487     case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f32:
16488       ArgTy = llvm::Type::getFloatTy(getLLVMContext());
16489       IID = Intrinsic::amdgcn_global_atomic_fadd;
16490       break;
16491     case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2f16:
16492       ArgTy = llvm::FixedVectorType::get(
16493           llvm::Type::getHalfTy(getLLVMContext()), 2);
16494       IID = Intrinsic::amdgcn_global_atomic_fadd;
16495       break;
16496     case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_f64:
16497       IID = Intrinsic::amdgcn_global_atomic_fadd;
16498       break;
16499     case AMDGPU::BI__builtin_amdgcn_global_atomic_fmin_f64:
16500       IID = Intrinsic::amdgcn_global_atomic_fmin;
16501       break;
16502     case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
16503       IID = Intrinsic::amdgcn_global_atomic_fmax;
16504       break;
16505     case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
16506       IID = Intrinsic::amdgcn_flat_atomic_fadd;
16507       break;
16508     case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
16509       IID = Intrinsic::amdgcn_flat_atomic_fmin;
16510       break;
16511     case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
16512       IID = Intrinsic::amdgcn_flat_atomic_fmax;
16513       break;
16514     }
16515     llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
16516     llvm::Value *Val = EmitScalarExpr(E->getArg(1));
16517     llvm::Function *F =
16518         CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
16519     return Builder.CreateCall(F, {Addr, Val});
16520   }
16521   case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
16522   case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: {
16523     Intrinsic::ID IID;
16524     llvm::Type *ArgTy;
16525     switch (BuiltinID) {
16526     case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32:
16527       ArgTy = llvm::Type::getFloatTy(getLLVMContext());
16528       IID = Intrinsic::amdgcn_ds_fadd;
16529       break;
16530     case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
16531       ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
16532       IID = Intrinsic::amdgcn_ds_fadd;
16533       break;
16534     }
16535     llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
16536     llvm::Value *Val = EmitScalarExpr(E->getArg(1));
16537     llvm::Constant *ZeroI32 = llvm::ConstantInt::getIntegerValue(
16538         llvm::Type::getInt32Ty(getLLVMContext()), APInt(32, 0, true));
16539     llvm::Constant *ZeroI1 = llvm::ConstantInt::getIntegerValue(
16540         llvm::Type::getInt1Ty(getLLVMContext()), APInt(1, 0));
16541     llvm::Function *F = CGM.getIntrinsic(IID, {ArgTy});
16542     return Builder.CreateCall(F, {Addr, Val, ZeroI32, ZeroI32, ZeroI1});
16543   }
16544   case AMDGPU::BI__builtin_amdgcn_read_exec: {
16545     CallInst *CI = cast<CallInst>(
16546       EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, NormalRead, "exec"));
16547     CI->setConvergent();
16548     return CI;
16549   }
16550   case AMDGPU::BI__builtin_amdgcn_read_exec_lo:
16551   case AMDGPU::BI__builtin_amdgcn_read_exec_hi: {
16552     StringRef RegName = BuiltinID == AMDGPU::BI__builtin_amdgcn_read_exec_lo ?
16553       "exec_lo" : "exec_hi";
16554     CallInst *CI = cast<CallInst>(
16555       EmitSpecialRegisterBuiltin(*this, E, Int32Ty, Int32Ty, NormalRead, RegName));
16556     CI->setConvergent();
16557     return CI;
16558   }
16559   case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray:
16560   case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h:
16561   case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l:
16562   case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: {
16563     llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0));
16564     llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1));
16565     llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2));
16566     llvm::Value *RayDir = EmitScalarExpr(E->getArg(3));
16567     llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
16568     llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
16569 
16570     Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
16571                                    {NodePtr->getType(), RayDir->getType()});
16572     return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
16573                                   RayInverseDir, TextureDescr});
16574   }
16575 
16576   // amdgcn workitem
16577   case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
16578     return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
16579   case AMDGPU::BI__builtin_amdgcn_workitem_id_y:
16580     return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_y, 0, 1024);
16581   case AMDGPU::BI__builtin_amdgcn_workitem_id_z:
16582     return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_z, 0, 1024);
16583 
16584   // amdgcn workgroup size
16585   case AMDGPU::BI__builtin_amdgcn_workgroup_size_x:
16586     return EmitAMDGPUWorkGroupSize(*this, 0);
16587   case AMDGPU::BI__builtin_amdgcn_workgroup_size_y:
16588     return EmitAMDGPUWorkGroupSize(*this, 1);
16589   case AMDGPU::BI__builtin_amdgcn_workgroup_size_z:
16590     return EmitAMDGPUWorkGroupSize(*this, 2);
16591 
16592   // amdgcn grid size
16593   case AMDGPU::BI__builtin_amdgcn_grid_size_x:
16594     return EmitAMDGPUGridSize(*this, 0);
16595   case AMDGPU::BI__builtin_amdgcn_grid_size_y:
16596     return EmitAMDGPUGridSize(*this, 1);
16597   case AMDGPU::BI__builtin_amdgcn_grid_size_z:
16598     return EmitAMDGPUGridSize(*this, 2);
16599 
16600   // r600 intrinsics
16601   case AMDGPU::BI__builtin_r600_recipsqrt_ieee:
16602   case AMDGPU::BI__builtin_r600_recipsqrt_ieeef:
16603     return emitUnaryBuiltin(*this, E, Intrinsic::r600_recipsqrt_ieee);
16604   case AMDGPU::BI__builtin_r600_read_tidig_x:
16605     return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_x, 0, 1024);
16606   case AMDGPU::BI__builtin_r600_read_tidig_y:
16607     return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_y, 0, 1024);
16608   case AMDGPU::BI__builtin_r600_read_tidig_z:
16609     return emitRangedBuiltin(*this, Intrinsic::r600_read_tidig_z, 0, 1024);
16610   case AMDGPU::BI__builtin_amdgcn_alignbit: {
16611     llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
16612     llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
16613     llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
16614     Function *F = CGM.getIntrinsic(Intrinsic::fshr, Src0->getType());
16615     return Builder.CreateCall(F, { Src0, Src1, Src2 });
16616   }
16617 
16618   case AMDGPU::BI__builtin_amdgcn_fence: {
16619     if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(0)),
16620                                 EmitScalarExpr(E->getArg(1)), AO, SSID))
16621       return Builder.CreateFence(AO, SSID);
16622     LLVM_FALLTHROUGH;
16623   }
16624   case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
16625   case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
16626   case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
16627   case AMDGPU::BI__builtin_amdgcn_atomic_dec64: {
16628     unsigned BuiltinAtomicOp;
16629     llvm::Type *ResultType = ConvertType(E->getType());
16630 
16631     switch (BuiltinID) {
16632     case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
16633     case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
16634       BuiltinAtomicOp = Intrinsic::amdgcn_atomic_inc;
16635       break;
16636     case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
16637     case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
16638       BuiltinAtomicOp = Intrinsic::amdgcn_atomic_dec;
16639       break;
16640     }
16641 
16642     Value *Ptr = EmitScalarExpr(E->getArg(0));
16643     Value *Val = EmitScalarExpr(E->getArg(1));
16644 
16645     llvm::Function *F =
16646         CGM.getIntrinsic(BuiltinAtomicOp, {ResultType, Ptr->getType()});
16647 
16648     if (ProcessOrderScopeAMDGCN(EmitScalarExpr(E->getArg(2)),
16649                                 EmitScalarExpr(E->getArg(3)), AO, SSID)) {
16650 
16651       // llvm.amdgcn.atomic.inc and llvm.amdgcn.atomic.dec expects ordering and
16652       // scope as unsigned values
16653       Value *MemOrder = Builder.getInt32(static_cast<int>(AO));
16654       Value *MemScope = Builder.getInt32(static_cast<int>(SSID));
16655 
16656       QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
16657       bool Volatile =
16658           PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
16659       Value *IsVolatile = Builder.getInt1(static_cast<bool>(Volatile));
16660 
16661       return Builder.CreateCall(F, {Ptr, Val, MemOrder, MemScope, IsVolatile});
16662     }
16663     LLVM_FALLTHROUGH;
16664   }
16665   default:
16666     return nullptr;
16667   }
16668 }
16669 
16670 /// Handle a SystemZ function in which the final argument is a pointer
16671 /// to an int that receives the post-instruction CC value.  At the LLVM level
16672 /// this is represented as a function that returns a {result, cc} pair.
16673 static Value *EmitSystemZIntrinsicWithCC(CodeGenFunction &CGF,
16674                                          unsigned IntrinsicID,
16675                                          const CallExpr *E) {
16676   unsigned NumArgs = E->getNumArgs() - 1;
16677   SmallVector<Value *, 8> Args(NumArgs);
16678   for (unsigned I = 0; I < NumArgs; ++I)
16679     Args[I] = CGF.EmitScalarExpr(E->getArg(I));
16680   Address CCPtr = CGF.EmitPointerWithAlignment(E->getArg(NumArgs));
16681   Function *F = CGF.CGM.getIntrinsic(IntrinsicID);
16682   Value *Call = CGF.Builder.CreateCall(F, Args);
16683   Value *CC = CGF.Builder.CreateExtractValue(Call, 1);
16684   CGF.Builder.CreateStore(CC, CCPtr);
16685   return CGF.Builder.CreateExtractValue(Call, 0);
16686 }
16687 
16688 Value *CodeGenFunction::EmitSystemZBuiltinExpr(unsigned BuiltinID,
16689                                                const CallExpr *E) {
16690   switch (BuiltinID) {
16691   case SystemZ::BI__builtin_tbegin: {
16692     Value *TDB = EmitScalarExpr(E->getArg(0));
16693     Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
16694     Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin);
16695     return Builder.CreateCall(F, {TDB, Control});
16696   }
16697   case SystemZ::BI__builtin_tbegin_nofloat: {
16698     Value *TDB = EmitScalarExpr(E->getArg(0));
16699     Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff0c);
16700     Function *F = CGM.getIntrinsic(Intrinsic::s390_tbegin_nofloat);
16701     return Builder.CreateCall(F, {TDB, Control});
16702   }
16703   case SystemZ::BI__builtin_tbeginc: {
16704     Value *TDB = llvm::ConstantPointerNull::get(Int8PtrTy);
16705     Value *Control = llvm::ConstantInt::get(Int32Ty, 0xff08);
16706     Function *F = CGM.getIntrinsic(Intrinsic::s390_tbeginc);
16707     return Builder.CreateCall(F, {TDB, Control});
16708   }
16709   case SystemZ::BI__builtin_tabort: {
16710     Value *Data = EmitScalarExpr(E->getArg(0));
16711     Function *F = CGM.getIntrinsic(Intrinsic::s390_tabort);
16712     return Builder.CreateCall(F, Builder.CreateSExt(Data, Int64Ty, "tabort"));
16713   }
16714   case SystemZ::BI__builtin_non_tx_store: {
16715     Value *Address = EmitScalarExpr(E->getArg(0));
16716     Value *Data = EmitScalarExpr(E->getArg(1));
16717     Function *F = CGM.getIntrinsic(Intrinsic::s390_ntstg);
16718     return Builder.CreateCall(F, {Data, Address});
16719   }
16720 
16721   // Vector builtins.  Note that most vector builtins are mapped automatically
16722   // to target-specific LLVM intrinsics.  The ones handled specially here can
16723   // be represented via standard LLVM IR, which is preferable to enable common
16724   // LLVM optimizations.
16725 
16726   case SystemZ::BI__builtin_s390_vpopctb:
16727   case SystemZ::BI__builtin_s390_vpopcth:
16728   case SystemZ::BI__builtin_s390_vpopctf:
16729   case SystemZ::BI__builtin_s390_vpopctg: {
16730     llvm::Type *ResultType = ConvertType(E->getType());
16731     Value *X = EmitScalarExpr(E->getArg(0));
16732     Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
16733     return Builder.CreateCall(F, X);
16734   }
16735 
16736   case SystemZ::BI__builtin_s390_vclzb:
16737   case SystemZ::BI__builtin_s390_vclzh:
16738   case SystemZ::BI__builtin_s390_vclzf:
16739   case SystemZ::BI__builtin_s390_vclzg: {
16740     llvm::Type *ResultType = ConvertType(E->getType());
16741     Value *X = EmitScalarExpr(E->getArg(0));
16742     Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
16743     Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
16744     return Builder.CreateCall(F, {X, Undef});
16745   }
16746 
16747   case SystemZ::BI__builtin_s390_vctzb:
16748   case SystemZ::BI__builtin_s390_vctzh:
16749   case SystemZ::BI__builtin_s390_vctzf:
16750   case SystemZ::BI__builtin_s390_vctzg: {
16751     llvm::Type *ResultType = ConvertType(E->getType());
16752     Value *X = EmitScalarExpr(E->getArg(0));
16753     Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
16754     Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
16755     return Builder.CreateCall(F, {X, Undef});
16756   }
16757 
16758   case SystemZ::BI__builtin_s390_vfsqsb:
16759   case SystemZ::BI__builtin_s390_vfsqdb: {
16760     llvm::Type *ResultType = ConvertType(E->getType());
16761     Value *X = EmitScalarExpr(E->getArg(0));
16762     if (Builder.getIsFPConstrained()) {
16763       Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt, ResultType);
16764       return Builder.CreateConstrainedFPCall(F, { X });
16765     } else {
16766       Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
16767       return Builder.CreateCall(F, X);
16768     }
16769   }
16770   case SystemZ::BI__builtin_s390_vfmasb:
16771   case SystemZ::BI__builtin_s390_vfmadb: {
16772     llvm::Type *ResultType = ConvertType(E->getType());
16773     Value *X = EmitScalarExpr(E->getArg(0));
16774     Value *Y = EmitScalarExpr(E->getArg(1));
16775     Value *Z = EmitScalarExpr(E->getArg(2));
16776     if (Builder.getIsFPConstrained()) {
16777       Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16778       return Builder.CreateConstrainedFPCall(F, {X, Y, Z});
16779     } else {
16780       Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16781       return Builder.CreateCall(F, {X, Y, Z});
16782     }
16783   }
16784   case SystemZ::BI__builtin_s390_vfmssb:
16785   case SystemZ::BI__builtin_s390_vfmsdb: {
16786     llvm::Type *ResultType = ConvertType(E->getType());
16787     Value *X = EmitScalarExpr(E->getArg(0));
16788     Value *Y = EmitScalarExpr(E->getArg(1));
16789     Value *Z = EmitScalarExpr(E->getArg(2));
16790     if (Builder.getIsFPConstrained()) {
16791       Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16792       return Builder.CreateConstrainedFPCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
16793     } else {
16794       Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16795       return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
16796     }
16797   }
16798   case SystemZ::BI__builtin_s390_vfnmasb:
16799   case SystemZ::BI__builtin_s390_vfnmadb: {
16800     llvm::Type *ResultType = ConvertType(E->getType());
16801     Value *X = EmitScalarExpr(E->getArg(0));
16802     Value *Y = EmitScalarExpr(E->getArg(1));
16803     Value *Z = EmitScalarExpr(E->getArg(2));
16804     if (Builder.getIsFPConstrained()) {
16805       Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16806       return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y,  Z}), "neg");
16807     } else {
16808       Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16809       return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, Z}), "neg");
16810     }
16811   }
16812   case SystemZ::BI__builtin_s390_vfnmssb:
16813   case SystemZ::BI__builtin_s390_vfnmsdb: {
16814     llvm::Type *ResultType = ConvertType(E->getType());
16815     Value *X = EmitScalarExpr(E->getArg(0));
16816     Value *Y = EmitScalarExpr(E->getArg(1));
16817     Value *Z = EmitScalarExpr(E->getArg(2));
16818     if (Builder.getIsFPConstrained()) {
16819       Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, ResultType);
16820       Value *NegZ = Builder.CreateFNeg(Z, "sub");
16821       return Builder.CreateFNeg(Builder.CreateConstrainedFPCall(F, {X, Y, NegZ}));
16822     } else {
16823       Function *F = CGM.getIntrinsic(Intrinsic::fma, ResultType);
16824       Value *NegZ = Builder.CreateFNeg(Z, "neg");
16825       return Builder.CreateFNeg(Builder.CreateCall(F, {X, Y, NegZ}));
16826     }
16827   }
16828   case SystemZ::BI__builtin_s390_vflpsb:
16829   case SystemZ::BI__builtin_s390_vflpdb: {
16830     llvm::Type *ResultType = ConvertType(E->getType());
16831     Value *X = EmitScalarExpr(E->getArg(0));
16832     Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
16833     return Builder.CreateCall(F, X);
16834   }
16835   case SystemZ::BI__builtin_s390_vflnsb:
16836   case SystemZ::BI__builtin_s390_vflndb: {
16837     llvm::Type *ResultType = ConvertType(E->getType());
16838     Value *X = EmitScalarExpr(E->getArg(0));
16839     Function *F = CGM.getIntrinsic(Intrinsic::fabs, ResultType);
16840     return Builder.CreateFNeg(Builder.CreateCall(F, X), "neg");
16841   }
16842   case SystemZ::BI__builtin_s390_vfisb:
16843   case SystemZ::BI__builtin_s390_vfidb: {
16844     llvm::Type *ResultType = ConvertType(E->getType());
16845     Value *X = EmitScalarExpr(E->getArg(0));
16846     // Constant-fold the M4 and M5 mask arguments.
16847     llvm::APSInt M4 = *E->getArg(1)->getIntegerConstantExpr(getContext());
16848     llvm::APSInt M5 = *E->getArg(2)->getIntegerConstantExpr(getContext());
16849     // Check whether this instance can be represented via a LLVM standard
16850     // intrinsic.  We only support some combinations of M4 and M5.
16851     Intrinsic::ID ID = Intrinsic::not_intrinsic;
16852     Intrinsic::ID CI;
16853     switch (M4.getZExtValue()) {
16854     default: break;
16855     case 0:  // IEEE-inexact exception allowed
16856       switch (M5.getZExtValue()) {
16857       default: break;
16858       case 0: ID = Intrinsic::rint;
16859               CI = Intrinsic::experimental_constrained_rint; break;
16860       }
16861       break;
16862     case 4:  // IEEE-inexact exception suppressed
16863       switch (M5.getZExtValue()) {
16864       default: break;
16865       case 0: ID = Intrinsic::nearbyint;
16866               CI = Intrinsic::experimental_constrained_nearbyint; break;
16867       case 1: ID = Intrinsic::round;
16868               CI = Intrinsic::experimental_constrained_round; break;
16869       case 5: ID = Intrinsic::trunc;
16870               CI = Intrinsic::experimental_constrained_trunc; break;
16871       case 6: ID = Intrinsic::ceil;
16872               CI = Intrinsic::experimental_constrained_ceil; break;
16873       case 7: ID = Intrinsic::floor;
16874               CI = Intrinsic::experimental_constrained_floor; break;
16875       }
16876       break;
16877     }
16878     if (ID != Intrinsic::not_intrinsic) {
16879       if (Builder.getIsFPConstrained()) {
16880         Function *F = CGM.getIntrinsic(CI, ResultType);
16881         return Builder.CreateConstrainedFPCall(F, X);
16882       } else {
16883         Function *F = CGM.getIntrinsic(ID, ResultType);
16884         return Builder.CreateCall(F, X);
16885       }
16886     }
16887     switch (BuiltinID) { // FIXME: constrained version?
16888       case SystemZ::BI__builtin_s390_vfisb: ID = Intrinsic::s390_vfisb; break;
16889       case SystemZ::BI__builtin_s390_vfidb: ID = Intrinsic::s390_vfidb; break;
16890       default: llvm_unreachable("Unknown BuiltinID");
16891     }
16892     Function *F = CGM.getIntrinsic(ID);
16893     Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16894     Value *M5Value = llvm::ConstantInt::get(getLLVMContext(), M5);
16895     return Builder.CreateCall(F, {X, M4Value, M5Value});
16896   }
16897   case SystemZ::BI__builtin_s390_vfmaxsb:
16898   case SystemZ::BI__builtin_s390_vfmaxdb: {
16899     llvm::Type *ResultType = ConvertType(E->getType());
16900     Value *X = EmitScalarExpr(E->getArg(0));
16901     Value *Y = EmitScalarExpr(E->getArg(1));
16902     // Constant-fold the M4 mask argument.
16903     llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
16904     // Check whether this instance can be represented via a LLVM standard
16905     // intrinsic.  We only support some values of M4.
16906     Intrinsic::ID ID = Intrinsic::not_intrinsic;
16907     Intrinsic::ID CI;
16908     switch (M4.getZExtValue()) {
16909     default: break;
16910     case 4: ID = Intrinsic::maxnum;
16911             CI = Intrinsic::experimental_constrained_maxnum; break;
16912     }
16913     if (ID != Intrinsic::not_intrinsic) {
16914       if (Builder.getIsFPConstrained()) {
16915         Function *F = CGM.getIntrinsic(CI, ResultType);
16916         return Builder.CreateConstrainedFPCall(F, {X, Y});
16917       } else {
16918         Function *F = CGM.getIntrinsic(ID, ResultType);
16919         return Builder.CreateCall(F, {X, Y});
16920       }
16921     }
16922     switch (BuiltinID) {
16923       case SystemZ::BI__builtin_s390_vfmaxsb: ID = Intrinsic::s390_vfmaxsb; break;
16924       case SystemZ::BI__builtin_s390_vfmaxdb: ID = Intrinsic::s390_vfmaxdb; break;
16925       default: llvm_unreachable("Unknown BuiltinID");
16926     }
16927     Function *F = CGM.getIntrinsic(ID);
16928     Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16929     return Builder.CreateCall(F, {X, Y, M4Value});
16930   }
16931   case SystemZ::BI__builtin_s390_vfminsb:
16932   case SystemZ::BI__builtin_s390_vfmindb: {
16933     llvm::Type *ResultType = ConvertType(E->getType());
16934     Value *X = EmitScalarExpr(E->getArg(0));
16935     Value *Y = EmitScalarExpr(E->getArg(1));
16936     // Constant-fold the M4 mask argument.
16937     llvm::APSInt M4 = *E->getArg(2)->getIntegerConstantExpr(getContext());
16938     // Check whether this instance can be represented via a LLVM standard
16939     // intrinsic.  We only support some values of M4.
16940     Intrinsic::ID ID = Intrinsic::not_intrinsic;
16941     Intrinsic::ID CI;
16942     switch (M4.getZExtValue()) {
16943     default: break;
16944     case 4: ID = Intrinsic::minnum;
16945             CI = Intrinsic::experimental_constrained_minnum; break;
16946     }
16947     if (ID != Intrinsic::not_intrinsic) {
16948       if (Builder.getIsFPConstrained()) {
16949         Function *F = CGM.getIntrinsic(CI, ResultType);
16950         return Builder.CreateConstrainedFPCall(F, {X, Y});
16951       } else {
16952         Function *F = CGM.getIntrinsic(ID, ResultType);
16953         return Builder.CreateCall(F, {X, Y});
16954       }
16955     }
16956     switch (BuiltinID) {
16957       case SystemZ::BI__builtin_s390_vfminsb: ID = Intrinsic::s390_vfminsb; break;
16958       case SystemZ::BI__builtin_s390_vfmindb: ID = Intrinsic::s390_vfmindb; break;
16959       default: llvm_unreachable("Unknown BuiltinID");
16960     }
16961     Function *F = CGM.getIntrinsic(ID);
16962     Value *M4Value = llvm::ConstantInt::get(getLLVMContext(), M4);
16963     return Builder.CreateCall(F, {X, Y, M4Value});
16964   }
16965 
16966   case SystemZ::BI__builtin_s390_vlbrh:
16967   case SystemZ::BI__builtin_s390_vlbrf:
16968   case SystemZ::BI__builtin_s390_vlbrg: {
16969     llvm::Type *ResultType = ConvertType(E->getType());
16970     Value *X = EmitScalarExpr(E->getArg(0));
16971     Function *F = CGM.getIntrinsic(Intrinsic::bswap, ResultType);
16972     return Builder.CreateCall(F, X);
16973   }
16974 
16975   // Vector intrinsics that output the post-instruction CC value.
16976 
16977 #define INTRINSIC_WITH_CC(NAME) \
16978     case SystemZ::BI__builtin_##NAME: \
16979       return EmitSystemZIntrinsicWithCC(*this, Intrinsic::NAME, E)
16980 
16981   INTRINSIC_WITH_CC(s390_vpkshs);
16982   INTRINSIC_WITH_CC(s390_vpksfs);
16983   INTRINSIC_WITH_CC(s390_vpksgs);
16984 
16985   INTRINSIC_WITH_CC(s390_vpklshs);
16986   INTRINSIC_WITH_CC(s390_vpklsfs);
16987   INTRINSIC_WITH_CC(s390_vpklsgs);
16988 
16989   INTRINSIC_WITH_CC(s390_vceqbs);
16990   INTRINSIC_WITH_CC(s390_vceqhs);
16991   INTRINSIC_WITH_CC(s390_vceqfs);
16992   INTRINSIC_WITH_CC(s390_vceqgs);
16993 
16994   INTRINSIC_WITH_CC(s390_vchbs);
16995   INTRINSIC_WITH_CC(s390_vchhs);
16996   INTRINSIC_WITH_CC(s390_vchfs);
16997   INTRINSIC_WITH_CC(s390_vchgs);
16998 
16999   INTRINSIC_WITH_CC(s390_vchlbs);
17000   INTRINSIC_WITH_CC(s390_vchlhs);
17001   INTRINSIC_WITH_CC(s390_vchlfs);
17002   INTRINSIC_WITH_CC(s390_vchlgs);
17003 
17004   INTRINSIC_WITH_CC(s390_vfaebs);
17005   INTRINSIC_WITH_CC(s390_vfaehs);
17006   INTRINSIC_WITH_CC(s390_vfaefs);
17007 
17008   INTRINSIC_WITH_CC(s390_vfaezbs);
17009   INTRINSIC_WITH_CC(s390_vfaezhs);
17010   INTRINSIC_WITH_CC(s390_vfaezfs);
17011 
17012   INTRINSIC_WITH_CC(s390_vfeebs);
17013   INTRINSIC_WITH_CC(s390_vfeehs);
17014   INTRINSIC_WITH_CC(s390_vfeefs);
17015 
17016   INTRINSIC_WITH_CC(s390_vfeezbs);
17017   INTRINSIC_WITH_CC(s390_vfeezhs);
17018   INTRINSIC_WITH_CC(s390_vfeezfs);
17019 
17020   INTRINSIC_WITH_CC(s390_vfenebs);
17021   INTRINSIC_WITH_CC(s390_vfenehs);
17022   INTRINSIC_WITH_CC(s390_vfenefs);
17023 
17024   INTRINSIC_WITH_CC(s390_vfenezbs);
17025   INTRINSIC_WITH_CC(s390_vfenezhs);
17026   INTRINSIC_WITH_CC(s390_vfenezfs);
17027 
17028   INTRINSIC_WITH_CC(s390_vistrbs);
17029   INTRINSIC_WITH_CC(s390_vistrhs);
17030   INTRINSIC_WITH_CC(s390_vistrfs);
17031 
17032   INTRINSIC_WITH_CC(s390_vstrcbs);
17033   INTRINSIC_WITH_CC(s390_vstrchs);
17034   INTRINSIC_WITH_CC(s390_vstrcfs);
17035 
17036   INTRINSIC_WITH_CC(s390_vstrczbs);
17037   INTRINSIC_WITH_CC(s390_vstrczhs);
17038   INTRINSIC_WITH_CC(s390_vstrczfs);
17039 
17040   INTRINSIC_WITH_CC(s390_vfcesbs);
17041   INTRINSIC_WITH_CC(s390_vfcedbs);
17042   INTRINSIC_WITH_CC(s390_vfchsbs);
17043   INTRINSIC_WITH_CC(s390_vfchdbs);
17044   INTRINSIC_WITH_CC(s390_vfchesbs);
17045   INTRINSIC_WITH_CC(s390_vfchedbs);
17046 
17047   INTRINSIC_WITH_CC(s390_vftcisb);
17048   INTRINSIC_WITH_CC(s390_vftcidb);
17049 
17050   INTRINSIC_WITH_CC(s390_vstrsb);
17051   INTRINSIC_WITH_CC(s390_vstrsh);
17052   INTRINSIC_WITH_CC(s390_vstrsf);
17053 
17054   INTRINSIC_WITH_CC(s390_vstrszb);
17055   INTRINSIC_WITH_CC(s390_vstrszh);
17056   INTRINSIC_WITH_CC(s390_vstrszf);
17057 
17058 #undef INTRINSIC_WITH_CC
17059 
17060   default:
17061     return nullptr;
17062   }
17063 }
17064 
17065 namespace {
17066 // Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
17067 struct NVPTXMmaLdstInfo {
17068   unsigned NumResults;  // Number of elements to load/store
17069   // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
17070   unsigned IID_col;
17071   unsigned IID_row;
17072 };
17073 
17074 #define MMA_INTR(geom_op_type, layout) \
17075   Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
17076 #define MMA_LDST(n, geom_op_type)                                              \
17077   { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
17078 
17079 static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
17080   switch (BuiltinID) {
17081   // FP MMA loads
17082   case NVPTX::BI__hmma_m16n16k16_ld_a:
17083     return MMA_LDST(8, m16n16k16_load_a_f16);
17084   case NVPTX::BI__hmma_m16n16k16_ld_b:
17085     return MMA_LDST(8, m16n16k16_load_b_f16);
17086   case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
17087     return MMA_LDST(4, m16n16k16_load_c_f16);
17088   case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
17089     return MMA_LDST(8, m16n16k16_load_c_f32);
17090   case NVPTX::BI__hmma_m32n8k16_ld_a:
17091     return MMA_LDST(8, m32n8k16_load_a_f16);
17092   case NVPTX::BI__hmma_m32n8k16_ld_b:
17093     return MMA_LDST(8, m32n8k16_load_b_f16);
17094   case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
17095     return MMA_LDST(4, m32n8k16_load_c_f16);
17096   case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
17097     return MMA_LDST(8, m32n8k16_load_c_f32);
17098   case NVPTX::BI__hmma_m8n32k16_ld_a:
17099     return MMA_LDST(8, m8n32k16_load_a_f16);
17100   case NVPTX::BI__hmma_m8n32k16_ld_b:
17101     return MMA_LDST(8, m8n32k16_load_b_f16);
17102   case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
17103     return MMA_LDST(4, m8n32k16_load_c_f16);
17104   case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
17105     return MMA_LDST(8, m8n32k16_load_c_f32);
17106 
17107   // Integer MMA loads
17108   case NVPTX::BI__imma_m16n16k16_ld_a_s8:
17109     return MMA_LDST(2, m16n16k16_load_a_s8);
17110   case NVPTX::BI__imma_m16n16k16_ld_a_u8:
17111     return MMA_LDST(2, m16n16k16_load_a_u8);
17112   case NVPTX::BI__imma_m16n16k16_ld_b_s8:
17113     return MMA_LDST(2, m16n16k16_load_b_s8);
17114   case NVPTX::BI__imma_m16n16k16_ld_b_u8:
17115     return MMA_LDST(2, m16n16k16_load_b_u8);
17116   case NVPTX::BI__imma_m16n16k16_ld_c:
17117     return MMA_LDST(8, m16n16k16_load_c_s32);
17118   case NVPTX::BI__imma_m32n8k16_ld_a_s8:
17119     return MMA_LDST(4, m32n8k16_load_a_s8);
17120   case NVPTX::BI__imma_m32n8k16_ld_a_u8:
17121     return MMA_LDST(4, m32n8k16_load_a_u8);
17122   case NVPTX::BI__imma_m32n8k16_ld_b_s8:
17123     return MMA_LDST(1, m32n8k16_load_b_s8);
17124   case NVPTX::BI__imma_m32n8k16_ld_b_u8:
17125     return MMA_LDST(1, m32n8k16_load_b_u8);
17126   case NVPTX::BI__imma_m32n8k16_ld_c:
17127     return MMA_LDST(8, m32n8k16_load_c_s32);
17128   case NVPTX::BI__imma_m8n32k16_ld_a_s8:
17129     return MMA_LDST(1, m8n32k16_load_a_s8);
17130   case NVPTX::BI__imma_m8n32k16_ld_a_u8:
17131     return MMA_LDST(1, m8n32k16_load_a_u8);
17132   case NVPTX::BI__imma_m8n32k16_ld_b_s8:
17133     return MMA_LDST(4, m8n32k16_load_b_s8);
17134   case NVPTX::BI__imma_m8n32k16_ld_b_u8:
17135     return MMA_LDST(4, m8n32k16_load_b_u8);
17136   case NVPTX::BI__imma_m8n32k16_ld_c:
17137     return MMA_LDST(8, m8n32k16_load_c_s32);
17138 
17139   // Sub-integer MMA loads.
17140   // Only row/col layout is supported by A/B fragments.
17141   case NVPTX::BI__imma_m8n8k32_ld_a_s4:
17142     return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
17143   case NVPTX::BI__imma_m8n8k32_ld_a_u4:
17144     return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
17145   case NVPTX::BI__imma_m8n8k32_ld_b_s4:
17146     return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
17147   case NVPTX::BI__imma_m8n8k32_ld_b_u4:
17148     return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
17149   case NVPTX::BI__imma_m8n8k32_ld_c:
17150     return MMA_LDST(2, m8n8k32_load_c_s32);
17151   case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
17152     return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
17153   case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
17154     return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
17155   case NVPTX::BI__bmma_m8n8k128_ld_c:
17156     return MMA_LDST(2, m8n8k128_load_c_s32);
17157 
17158   // Double MMA loads
17159   case NVPTX::BI__dmma_m8n8k4_ld_a:
17160     return MMA_LDST(1, m8n8k4_load_a_f64);
17161   case NVPTX::BI__dmma_m8n8k4_ld_b:
17162     return MMA_LDST(1, m8n8k4_load_b_f64);
17163   case NVPTX::BI__dmma_m8n8k4_ld_c:
17164     return MMA_LDST(2, m8n8k4_load_c_f64);
17165 
17166   // Alternate float MMA loads
17167   case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
17168     return MMA_LDST(4, m16n16k16_load_a_bf16);
17169   case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
17170     return MMA_LDST(4, m16n16k16_load_b_bf16);
17171   case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
17172     return MMA_LDST(2, m8n32k16_load_a_bf16);
17173   case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
17174     return MMA_LDST(8, m8n32k16_load_b_bf16);
17175   case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
17176     return MMA_LDST(8, m32n8k16_load_a_bf16);
17177   case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
17178     return MMA_LDST(2, m32n8k16_load_b_bf16);
17179   case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
17180     return MMA_LDST(4, m16n16k8_load_a_tf32);
17181   case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
17182     return MMA_LDST(2, m16n16k8_load_b_tf32);
17183   case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
17184     return MMA_LDST(8, m16n16k8_load_c_f32);
17185 
17186   // NOTE: We need to follow inconsitent naming scheme used by NVCC.  Unlike
17187   // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
17188   // use fragment C for both loads and stores.
17189   // FP MMA stores.
17190   case NVPTX::BI__hmma_m16n16k16_st_c_f16:
17191     return MMA_LDST(4, m16n16k16_store_d_f16);
17192   case NVPTX::BI__hmma_m16n16k16_st_c_f32:
17193     return MMA_LDST(8, m16n16k16_store_d_f32);
17194   case NVPTX::BI__hmma_m32n8k16_st_c_f16:
17195     return MMA_LDST(4, m32n8k16_store_d_f16);
17196   case NVPTX::BI__hmma_m32n8k16_st_c_f32:
17197     return MMA_LDST(8, m32n8k16_store_d_f32);
17198   case NVPTX::BI__hmma_m8n32k16_st_c_f16:
17199     return MMA_LDST(4, m8n32k16_store_d_f16);
17200   case NVPTX::BI__hmma_m8n32k16_st_c_f32:
17201     return MMA_LDST(8, m8n32k16_store_d_f32);
17202 
17203   // Integer and sub-integer MMA stores.
17204   // Another naming quirk. Unlike other MMA builtins that use PTX types in the
17205   // name, integer loads/stores use LLVM's i32.
17206   case NVPTX::BI__imma_m16n16k16_st_c_i32:
17207     return MMA_LDST(8, m16n16k16_store_d_s32);
17208   case NVPTX::BI__imma_m32n8k16_st_c_i32:
17209     return MMA_LDST(8, m32n8k16_store_d_s32);
17210   case NVPTX::BI__imma_m8n32k16_st_c_i32:
17211     return MMA_LDST(8, m8n32k16_store_d_s32);
17212   case NVPTX::BI__imma_m8n8k32_st_c_i32:
17213     return MMA_LDST(2, m8n8k32_store_d_s32);
17214   case NVPTX::BI__bmma_m8n8k128_st_c_i32:
17215     return MMA_LDST(2, m8n8k128_store_d_s32);
17216 
17217   // Double MMA store
17218   case NVPTX::BI__dmma_m8n8k4_st_c_f64:
17219     return MMA_LDST(2, m8n8k4_store_d_f64);
17220 
17221   // Alternate float MMA store
17222   case NVPTX::BI__mma_m16n16k8_st_c_f32:
17223     return MMA_LDST(8, m16n16k8_store_d_f32);
17224 
17225   default:
17226     llvm_unreachable("Unknown MMA builtin");
17227   }
17228 }
17229 #undef MMA_LDST
17230 #undef MMA_INTR
17231 
17232 
17233 struct NVPTXMmaInfo {
17234   unsigned NumEltsA;
17235   unsigned NumEltsB;
17236   unsigned NumEltsC;
17237   unsigned NumEltsD;
17238 
17239   // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
17240   // over 'col' for layout. The index of non-satf variants is expected to match
17241   // the undocumented layout constants used by CUDA's mma.hpp.
17242   std::array<unsigned, 8> Variants;
17243 
17244   unsigned getMMAIntrinsic(int Layout, bool Satf) {
17245     unsigned Index = Layout + 4 * Satf;
17246     if (Index >= Variants.size())
17247       return 0;
17248     return Variants[Index];
17249   }
17250 };
17251 
17252   // Returns an intrinsic that matches Layout and Satf for valid combinations of
17253   // Layout and Satf, 0 otherwise.
17254 static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
17255   // clang-format off
17256 #define MMA_VARIANTS(geom, type)                                    \
17257       Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type,             \
17258       Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type,             \
17259       Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type,             \
17260       Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
17261 #define MMA_SATF_VARIANTS(geom, type)                               \
17262       MMA_VARIANTS(geom, type),                                     \
17263       Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
17264       Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
17265       Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
17266       Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
17267 // Sub-integer MMA only supports row.col layout.
17268 #define MMA_VARIANTS_I4(geom, type) \
17269       0, \
17270       Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type,             \
17271       0, \
17272       0, \
17273       0, \
17274       Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
17275       0, \
17276       0
17277 // b1 MMA does not support .satfinite.
17278 #define MMA_VARIANTS_B1_XOR(geom, type) \
17279       0, \
17280       Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type,             \
17281       0, \
17282       0, \
17283       0, \
17284       0, \
17285       0, \
17286       0
17287 #define MMA_VARIANTS_B1_AND(geom, type) \
17288       0, \
17289       Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type,             \
17290       0, \
17291       0, \
17292       0, \
17293       0, \
17294       0, \
17295       0
17296   // clang-format on
17297   switch (BuiltinID) {
17298   // FP MMA
17299   // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
17300   // NumEltsN of return value are ordered as A,B,C,D.
17301   case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
17302     return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}};
17303   case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
17304     return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}};
17305   case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
17306     return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}};
17307   case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
17308     return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}};
17309   case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
17310     return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}};
17311   case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
17312     return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}};
17313   case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
17314     return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}};
17315   case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
17316     return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}};
17317   case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
17318     return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}};
17319   case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
17320     return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}};
17321   case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
17322     return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}};
17323   case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
17324     return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}};
17325 
17326   // Integer MMA
17327   case NVPTX::BI__imma_m16n16k16_mma_s8:
17328     return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}};
17329   case NVPTX::BI__imma_m16n16k16_mma_u8:
17330     return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}};
17331   case NVPTX::BI__imma_m32n8k16_mma_s8:
17332     return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}};
17333   case NVPTX::BI__imma_m32n8k16_mma_u8:
17334     return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}};
17335   case NVPTX::BI__imma_m8n32k16_mma_s8:
17336     return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}};
17337   case NVPTX::BI__imma_m8n32k16_mma_u8:
17338     return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}};
17339 
17340   // Sub-integer MMA
17341   case NVPTX::BI__imma_m8n8k32_mma_s4:
17342     return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}};
17343   case NVPTX::BI__imma_m8n8k32_mma_u4:
17344     return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}};
17345   case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
17346     return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}};
17347   case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
17348     return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}};
17349 
17350   // Double MMA
17351   case NVPTX::BI__dmma_m8n8k4_mma_f64:
17352     return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}};
17353 
17354   // Alternate FP MMA
17355   case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
17356     return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}};
17357   case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
17358     return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}};
17359   case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
17360     return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}};
17361   case NVPTX::BI__mma_tf32_m16n16k8_mma_f32:
17362     return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}};
17363   default:
17364     llvm_unreachable("Unexpected builtin ID.");
17365   }
17366 #undef MMA_VARIANTS
17367 #undef MMA_SATF_VARIANTS
17368 #undef MMA_VARIANTS_I4
17369 #undef MMA_VARIANTS_B1_AND
17370 #undef MMA_VARIANTS_B1_XOR
17371 }
17372 
17373 } // namespace
17374 
17375 Value *
17376 CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
17377   auto MakeLdg = [&](unsigned IntrinsicID) {
17378     Value *Ptr = EmitScalarExpr(E->getArg(0));
17379     clang::CharUnits Align =
17380         CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
17381     return Builder.CreateCall(
17382         CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
17383                                        Ptr->getType()}),
17384         {Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
17385   };
17386   auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
17387     Value *Ptr = EmitScalarExpr(E->getArg(0));
17388     return Builder.CreateCall(
17389         CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
17390                                        Ptr->getType()}),
17391         {Ptr, EmitScalarExpr(E->getArg(1))});
17392   };
17393   switch (BuiltinID) {
17394   case NVPTX::BI__nvvm_atom_add_gen_i:
17395   case NVPTX::BI__nvvm_atom_add_gen_l:
17396   case NVPTX::BI__nvvm_atom_add_gen_ll:
17397     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
17398 
17399   case NVPTX::BI__nvvm_atom_sub_gen_i:
17400   case NVPTX::BI__nvvm_atom_sub_gen_l:
17401   case NVPTX::BI__nvvm_atom_sub_gen_ll:
17402     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
17403 
17404   case NVPTX::BI__nvvm_atom_and_gen_i:
17405   case NVPTX::BI__nvvm_atom_and_gen_l:
17406   case NVPTX::BI__nvvm_atom_and_gen_ll:
17407     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
17408 
17409   case NVPTX::BI__nvvm_atom_or_gen_i:
17410   case NVPTX::BI__nvvm_atom_or_gen_l:
17411   case NVPTX::BI__nvvm_atom_or_gen_ll:
17412     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
17413 
17414   case NVPTX::BI__nvvm_atom_xor_gen_i:
17415   case NVPTX::BI__nvvm_atom_xor_gen_l:
17416   case NVPTX::BI__nvvm_atom_xor_gen_ll:
17417     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
17418 
17419   case NVPTX::BI__nvvm_atom_xchg_gen_i:
17420   case NVPTX::BI__nvvm_atom_xchg_gen_l:
17421   case NVPTX::BI__nvvm_atom_xchg_gen_ll:
17422     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
17423 
17424   case NVPTX::BI__nvvm_atom_max_gen_i:
17425   case NVPTX::BI__nvvm_atom_max_gen_l:
17426   case NVPTX::BI__nvvm_atom_max_gen_ll:
17427     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
17428 
17429   case NVPTX::BI__nvvm_atom_max_gen_ui:
17430   case NVPTX::BI__nvvm_atom_max_gen_ul:
17431   case NVPTX::BI__nvvm_atom_max_gen_ull:
17432     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
17433 
17434   case NVPTX::BI__nvvm_atom_min_gen_i:
17435   case NVPTX::BI__nvvm_atom_min_gen_l:
17436   case NVPTX::BI__nvvm_atom_min_gen_ll:
17437     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
17438 
17439   case NVPTX::BI__nvvm_atom_min_gen_ui:
17440   case NVPTX::BI__nvvm_atom_min_gen_ul:
17441   case NVPTX::BI__nvvm_atom_min_gen_ull:
17442     return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
17443 
17444   case NVPTX::BI__nvvm_atom_cas_gen_i:
17445   case NVPTX::BI__nvvm_atom_cas_gen_l:
17446   case NVPTX::BI__nvvm_atom_cas_gen_ll:
17447     // __nvvm_atom_cas_gen_* should return the old value rather than the
17448     // success flag.
17449     return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
17450 
17451   case NVPTX::BI__nvvm_atom_add_gen_f:
17452   case NVPTX::BI__nvvm_atom_add_gen_d: {
17453     Value *Ptr = EmitScalarExpr(E->getArg(0));
17454     Value *Val = EmitScalarExpr(E->getArg(1));
17455     return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, Ptr, Val,
17456                                    AtomicOrdering::SequentiallyConsistent);
17457   }
17458 
17459   case NVPTX::BI__nvvm_atom_inc_gen_ui: {
17460     Value *Ptr = EmitScalarExpr(E->getArg(0));
17461     Value *Val = EmitScalarExpr(E->getArg(1));
17462     Function *FnALI32 =
17463         CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_inc_32, Ptr->getType());
17464     return Builder.CreateCall(FnALI32, {Ptr, Val});
17465   }
17466 
17467   case NVPTX::BI__nvvm_atom_dec_gen_ui: {
17468     Value *Ptr = EmitScalarExpr(E->getArg(0));
17469     Value *Val = EmitScalarExpr(E->getArg(1));
17470     Function *FnALD32 =
17471         CGM.getIntrinsic(Intrinsic::nvvm_atomic_load_dec_32, Ptr->getType());
17472     return Builder.CreateCall(FnALD32, {Ptr, Val});
17473   }
17474 
17475   case NVPTX::BI__nvvm_ldg_c:
17476   case NVPTX::BI__nvvm_ldg_c2:
17477   case NVPTX::BI__nvvm_ldg_c4:
17478   case NVPTX::BI__nvvm_ldg_s:
17479   case NVPTX::BI__nvvm_ldg_s2:
17480   case NVPTX::BI__nvvm_ldg_s4:
17481   case NVPTX::BI__nvvm_ldg_i:
17482   case NVPTX::BI__nvvm_ldg_i2:
17483   case NVPTX::BI__nvvm_ldg_i4:
17484   case NVPTX::BI__nvvm_ldg_l:
17485   case NVPTX::BI__nvvm_ldg_ll:
17486   case NVPTX::BI__nvvm_ldg_ll2:
17487   case NVPTX::BI__nvvm_ldg_uc:
17488   case NVPTX::BI__nvvm_ldg_uc2:
17489   case NVPTX::BI__nvvm_ldg_uc4:
17490   case NVPTX::BI__nvvm_ldg_us:
17491   case NVPTX::BI__nvvm_ldg_us2:
17492   case NVPTX::BI__nvvm_ldg_us4:
17493   case NVPTX::BI__nvvm_ldg_ui:
17494   case NVPTX::BI__nvvm_ldg_ui2:
17495   case NVPTX::BI__nvvm_ldg_ui4:
17496   case NVPTX::BI__nvvm_ldg_ul:
17497   case NVPTX::BI__nvvm_ldg_ull:
17498   case NVPTX::BI__nvvm_ldg_ull2:
17499     // PTX Interoperability section 2.2: "For a vector with an even number of
17500     // elements, its alignment is set to number of elements times the alignment
17501     // of its member: n*alignof(t)."
17502     return MakeLdg(Intrinsic::nvvm_ldg_global_i);
17503   case NVPTX::BI__nvvm_ldg_f:
17504   case NVPTX::BI__nvvm_ldg_f2:
17505   case NVPTX::BI__nvvm_ldg_f4:
17506   case NVPTX::BI__nvvm_ldg_d:
17507   case NVPTX::BI__nvvm_ldg_d2:
17508     return MakeLdg(Intrinsic::nvvm_ldg_global_f);
17509 
17510   case NVPTX::BI__nvvm_atom_cta_add_gen_i:
17511   case NVPTX::BI__nvvm_atom_cta_add_gen_l:
17512   case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
17513     return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
17514   case NVPTX::BI__nvvm_atom_sys_add_gen_i:
17515   case NVPTX::BI__nvvm_atom_sys_add_gen_l:
17516   case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
17517     return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
17518   case NVPTX::BI__nvvm_atom_cta_add_gen_f:
17519   case NVPTX::BI__nvvm_atom_cta_add_gen_d:
17520     return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
17521   case NVPTX::BI__nvvm_atom_sys_add_gen_f:
17522   case NVPTX::BI__nvvm_atom_sys_add_gen_d:
17523     return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
17524   case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
17525   case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
17526   case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
17527     return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
17528   case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
17529   case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
17530   case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
17531     return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
17532   case NVPTX::BI__nvvm_atom_cta_max_gen_i:
17533   case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
17534   case NVPTX::BI__nvvm_atom_cta_max_gen_l:
17535   case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
17536   case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
17537   case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
17538     return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
17539   case NVPTX::BI__nvvm_atom_sys_max_gen_i:
17540   case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
17541   case NVPTX::BI__nvvm_atom_sys_max_gen_l:
17542   case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
17543   case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
17544   case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
17545     return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
17546   case NVPTX::BI__nvvm_atom_cta_min_gen_i:
17547   case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
17548   case NVPTX::BI__nvvm_atom_cta_min_gen_l:
17549   case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
17550   case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
17551   case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
17552     return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
17553   case NVPTX::BI__nvvm_atom_sys_min_gen_i:
17554   case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
17555   case NVPTX::BI__nvvm_atom_sys_min_gen_l:
17556   case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
17557   case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
17558   case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
17559     return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
17560   case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
17561     return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
17562   case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
17563     return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
17564   case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
17565     return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
17566   case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
17567     return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
17568   case NVPTX::BI__nvvm_atom_cta_and_gen_i:
17569   case NVPTX::BI__nvvm_atom_cta_and_gen_l:
17570   case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
17571     return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
17572   case NVPTX::BI__nvvm_atom_sys_and_gen_i:
17573   case NVPTX::BI__nvvm_atom_sys_and_gen_l:
17574   case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
17575     return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
17576   case NVPTX::BI__nvvm_atom_cta_or_gen_i:
17577   case NVPTX::BI__nvvm_atom_cta_or_gen_l:
17578   case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
17579     return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
17580   case NVPTX::BI__nvvm_atom_sys_or_gen_i:
17581   case NVPTX::BI__nvvm_atom_sys_or_gen_l:
17582   case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
17583     return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
17584   case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
17585   case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
17586   case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
17587     return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
17588   case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
17589   case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
17590   case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
17591     return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
17592   case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
17593   case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
17594   case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
17595     Value *Ptr = EmitScalarExpr(E->getArg(0));
17596     return Builder.CreateCall(
17597         CGM.getIntrinsic(
17598             Intrinsic::nvvm_atomic_cas_gen_i_cta,
17599             {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
17600         {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
17601   }
17602   case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
17603   case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
17604   case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
17605     Value *Ptr = EmitScalarExpr(E->getArg(0));
17606     return Builder.CreateCall(
17607         CGM.getIntrinsic(
17608             Intrinsic::nvvm_atomic_cas_gen_i_sys,
17609             {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
17610         {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
17611   }
17612   case NVPTX::BI__nvvm_match_all_sync_i32p:
17613   case NVPTX::BI__nvvm_match_all_sync_i64p: {
17614     Value *Mask = EmitScalarExpr(E->getArg(0));
17615     Value *Val = EmitScalarExpr(E->getArg(1));
17616     Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
17617     Value *ResultPair = Builder.CreateCall(
17618         CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
17619                              ? Intrinsic::nvvm_match_all_sync_i32p
17620                              : Intrinsic::nvvm_match_all_sync_i64p),
17621         {Mask, Val});
17622     Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
17623                                      PredOutPtr.getElementType());
17624     Builder.CreateStore(Pred, PredOutPtr);
17625     return Builder.CreateExtractValue(ResultPair, 0);
17626   }
17627 
17628   // FP MMA loads
17629   case NVPTX::BI__hmma_m16n16k16_ld_a:
17630   case NVPTX::BI__hmma_m16n16k16_ld_b:
17631   case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
17632   case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
17633   case NVPTX::BI__hmma_m32n8k16_ld_a:
17634   case NVPTX::BI__hmma_m32n8k16_ld_b:
17635   case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
17636   case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
17637   case NVPTX::BI__hmma_m8n32k16_ld_a:
17638   case NVPTX::BI__hmma_m8n32k16_ld_b:
17639   case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
17640   case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
17641   // Integer MMA loads.
17642   case NVPTX::BI__imma_m16n16k16_ld_a_s8:
17643   case NVPTX::BI__imma_m16n16k16_ld_a_u8:
17644   case NVPTX::BI__imma_m16n16k16_ld_b_s8:
17645   case NVPTX::BI__imma_m16n16k16_ld_b_u8:
17646   case NVPTX::BI__imma_m16n16k16_ld_c:
17647   case NVPTX::BI__imma_m32n8k16_ld_a_s8:
17648   case NVPTX::BI__imma_m32n8k16_ld_a_u8:
17649   case NVPTX::BI__imma_m32n8k16_ld_b_s8:
17650   case NVPTX::BI__imma_m32n8k16_ld_b_u8:
17651   case NVPTX::BI__imma_m32n8k16_ld_c:
17652   case NVPTX::BI__imma_m8n32k16_ld_a_s8:
17653   case NVPTX::BI__imma_m8n32k16_ld_a_u8:
17654   case NVPTX::BI__imma_m8n32k16_ld_b_s8:
17655   case NVPTX::BI__imma_m8n32k16_ld_b_u8:
17656   case NVPTX::BI__imma_m8n32k16_ld_c:
17657   // Sub-integer MMA loads.
17658   case NVPTX::BI__imma_m8n8k32_ld_a_s4:
17659   case NVPTX::BI__imma_m8n8k32_ld_a_u4:
17660   case NVPTX::BI__imma_m8n8k32_ld_b_s4:
17661   case NVPTX::BI__imma_m8n8k32_ld_b_u4:
17662   case NVPTX::BI__imma_m8n8k32_ld_c:
17663   case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
17664   case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
17665   case NVPTX::BI__bmma_m8n8k128_ld_c:
17666   // Double MMA loads.
17667   case NVPTX::BI__dmma_m8n8k4_ld_a:
17668   case NVPTX::BI__dmma_m8n8k4_ld_b:
17669   case NVPTX::BI__dmma_m8n8k4_ld_c:
17670   // Alternate float MMA loads.
17671   case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
17672   case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
17673   case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
17674   case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
17675   case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
17676   case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
17677   case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
17678   case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
17679   case NVPTX::BI__mma_tf32_m16n16k8_ld_c: {
17680     Address Dst = EmitPointerWithAlignment(E->getArg(0));
17681     Value *Src = EmitScalarExpr(E->getArg(1));
17682     Value *Ldm = EmitScalarExpr(E->getArg(2));
17683     Optional<llvm::APSInt> isColMajorArg =
17684         E->getArg(3)->getIntegerConstantExpr(getContext());
17685     if (!isColMajorArg)
17686       return nullptr;
17687     bool isColMajor = isColMajorArg->getSExtValue();
17688     NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
17689     unsigned IID = isColMajor ? II.IID_col : II.IID_row;
17690     if (IID == 0)
17691       return nullptr;
17692 
17693     Value *Result =
17694         Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
17695 
17696     // Save returned values.
17697     assert(II.NumResults);
17698     if (II.NumResults == 1) {
17699       Builder.CreateAlignedStore(Result, Dst.getPointer(),
17700                                  CharUnits::fromQuantity(4));
17701     } else {
17702       for (unsigned i = 0; i < II.NumResults; ++i) {
17703         Builder.CreateAlignedStore(
17704             Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
17705                                   Dst.getElementType()),
17706             Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
17707                               llvm::ConstantInt::get(IntTy, i)),
17708             CharUnits::fromQuantity(4));
17709       }
17710     }
17711     return Result;
17712   }
17713 
17714   case NVPTX::BI__hmma_m16n16k16_st_c_f16:
17715   case NVPTX::BI__hmma_m16n16k16_st_c_f32:
17716   case NVPTX::BI__hmma_m32n8k16_st_c_f16:
17717   case NVPTX::BI__hmma_m32n8k16_st_c_f32:
17718   case NVPTX::BI__hmma_m8n32k16_st_c_f16:
17719   case NVPTX::BI__hmma_m8n32k16_st_c_f32:
17720   case NVPTX::BI__imma_m16n16k16_st_c_i32:
17721   case NVPTX::BI__imma_m32n8k16_st_c_i32:
17722   case NVPTX::BI__imma_m8n32k16_st_c_i32:
17723   case NVPTX::BI__imma_m8n8k32_st_c_i32:
17724   case NVPTX::BI__bmma_m8n8k128_st_c_i32:
17725   case NVPTX::BI__dmma_m8n8k4_st_c_f64:
17726   case NVPTX::BI__mma_m16n16k8_st_c_f32: {
17727     Value *Dst = EmitScalarExpr(E->getArg(0));
17728     Address Src = EmitPointerWithAlignment(E->getArg(1));
17729     Value *Ldm = EmitScalarExpr(E->getArg(2));
17730     Optional<llvm::APSInt> isColMajorArg =
17731         E->getArg(3)->getIntegerConstantExpr(getContext());
17732     if (!isColMajorArg)
17733       return nullptr;
17734     bool isColMajor = isColMajorArg->getSExtValue();
17735     NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
17736     unsigned IID = isColMajor ? II.IID_col : II.IID_row;
17737     if (IID == 0)
17738       return nullptr;
17739     Function *Intrinsic =
17740         CGM.getIntrinsic(IID, Dst->getType());
17741     llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
17742     SmallVector<Value *, 10> Values = {Dst};
17743     for (unsigned i = 0; i < II.NumResults; ++i) {
17744       Value *V = Builder.CreateAlignedLoad(
17745           Src.getElementType(),
17746           Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
17747                             llvm::ConstantInt::get(IntTy, i)),
17748           CharUnits::fromQuantity(4));
17749       Values.push_back(Builder.CreateBitCast(V, ParamType));
17750     }
17751     Values.push_back(Ldm);
17752     Value *Result = Builder.CreateCall(Intrinsic, Values);
17753     return Result;
17754   }
17755 
17756   // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
17757   // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
17758   case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
17759   case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
17760   case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
17761   case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
17762   case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
17763   case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
17764   case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
17765   case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
17766   case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
17767   case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
17768   case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
17769   case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
17770   case NVPTX::BI__imma_m16n16k16_mma_s8:
17771   case NVPTX::BI__imma_m16n16k16_mma_u8:
17772   case NVPTX::BI__imma_m32n8k16_mma_s8:
17773   case NVPTX::BI__imma_m32n8k16_mma_u8:
17774   case NVPTX::BI__imma_m8n32k16_mma_s8:
17775   case NVPTX::BI__imma_m8n32k16_mma_u8:
17776   case NVPTX::BI__imma_m8n8k32_mma_s4:
17777   case NVPTX::BI__imma_m8n8k32_mma_u4:
17778   case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
17779   case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
17780   case NVPTX::BI__dmma_m8n8k4_mma_f64:
17781   case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
17782   case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
17783   case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
17784   case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: {
17785     Address Dst = EmitPointerWithAlignment(E->getArg(0));
17786     Address SrcA = EmitPointerWithAlignment(E->getArg(1));
17787     Address SrcB = EmitPointerWithAlignment(E->getArg(2));
17788     Address SrcC = EmitPointerWithAlignment(E->getArg(3));
17789     Optional<llvm::APSInt> LayoutArg =
17790         E->getArg(4)->getIntegerConstantExpr(getContext());
17791     if (!LayoutArg)
17792       return nullptr;
17793     int Layout = LayoutArg->getSExtValue();
17794     if (Layout < 0 || Layout > 3)
17795       return nullptr;
17796     llvm::APSInt SatfArg;
17797     if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
17798         BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
17799       SatfArg = 0;  // .b1 does not have satf argument.
17800     else if (Optional<llvm::APSInt> OptSatfArg =
17801                  E->getArg(5)->getIntegerConstantExpr(getContext()))
17802       SatfArg = *OptSatfArg;
17803     else
17804       return nullptr;
17805     bool Satf = SatfArg.getSExtValue();
17806     NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
17807     unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
17808     if (IID == 0)  // Unsupported combination of Layout/Satf.
17809       return nullptr;
17810 
17811     SmallVector<Value *, 24> Values;
17812     Function *Intrinsic = CGM.getIntrinsic(IID);
17813     llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
17814     // Load A
17815     for (unsigned i = 0; i < MI.NumEltsA; ++i) {
17816       Value *V = Builder.CreateAlignedLoad(
17817           SrcA.getElementType(),
17818           Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
17819                             llvm::ConstantInt::get(IntTy, i)),
17820           CharUnits::fromQuantity(4));
17821       Values.push_back(Builder.CreateBitCast(V, AType));
17822     }
17823     // Load B
17824     llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
17825     for (unsigned i = 0; i < MI.NumEltsB; ++i) {
17826       Value *V = Builder.CreateAlignedLoad(
17827           SrcB.getElementType(),
17828           Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
17829                             llvm::ConstantInt::get(IntTy, i)),
17830           CharUnits::fromQuantity(4));
17831       Values.push_back(Builder.CreateBitCast(V, BType));
17832     }
17833     // Load C
17834     llvm::Type *CType =
17835         Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
17836     for (unsigned i = 0; i < MI.NumEltsC; ++i) {
17837       Value *V = Builder.CreateAlignedLoad(
17838           SrcC.getElementType(),
17839           Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
17840                             llvm::ConstantInt::get(IntTy, i)),
17841           CharUnits::fromQuantity(4));
17842       Values.push_back(Builder.CreateBitCast(V, CType));
17843     }
17844     Value *Result = Builder.CreateCall(Intrinsic, Values);
17845     llvm::Type *DType = Dst.getElementType();
17846     for (unsigned i = 0; i < MI.NumEltsD; ++i)
17847       Builder.CreateAlignedStore(
17848           Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
17849           Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
17850                             llvm::ConstantInt::get(IntTy, i)),
17851           CharUnits::fromQuantity(4));
17852     return Result;
17853   }
17854   default:
17855     return nullptr;
17856   }
17857 }
17858 
17859 namespace {
17860 struct BuiltinAlignArgs {
17861   llvm::Value *Src = nullptr;
17862   llvm::Type *SrcType = nullptr;
17863   llvm::Value *Alignment = nullptr;
17864   llvm::Value *Mask = nullptr;
17865   llvm::IntegerType *IntType = nullptr;
17866 
17867   BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
17868     QualType AstType = E->getArg(0)->getType();
17869     if (AstType->isArrayType())
17870       Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).getPointer();
17871     else
17872       Src = CGF.EmitScalarExpr(E->getArg(0));
17873     SrcType = Src->getType();
17874     if (SrcType->isPointerTy()) {
17875       IntType = IntegerType::get(
17876           CGF.getLLVMContext(),
17877           CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
17878     } else {
17879       assert(SrcType->isIntegerTy());
17880       IntType = cast<llvm::IntegerType>(SrcType);
17881     }
17882     Alignment = CGF.EmitScalarExpr(E->getArg(1));
17883     Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
17884     auto *One = llvm::ConstantInt::get(IntType, 1);
17885     Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
17886   }
17887 };
17888 } // namespace
17889 
17890 /// Generate (x & (y-1)) == 0.
17891 RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
17892   BuiltinAlignArgs Args(E, *this);
17893   llvm::Value *SrcAddress = Args.Src;
17894   if (Args.SrcType->isPointerTy())
17895     SrcAddress =
17896         Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
17897   return RValue::get(Builder.CreateICmpEQ(
17898       Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
17899       llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
17900 }
17901 
17902 /// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
17903 /// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
17904 /// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
17905 /// TODO: actually use ptrmask once most optimization passes know about it.
17906 RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
17907   BuiltinAlignArgs Args(E, *this);
17908   llvm::Value *SrcAddr = Args.Src;
17909   if (Args.Src->getType()->isPointerTy())
17910     SrcAddr = Builder.CreatePtrToInt(Args.Src, Args.IntType, "intptr");
17911   llvm::Value *SrcForMask = SrcAddr;
17912   if (AlignUp) {
17913     // When aligning up we have to first add the mask to ensure we go over the
17914     // next alignment value and then align down to the next valid multiple.
17915     // By adding the mask, we ensure that align_up on an already aligned
17916     // value will not change the value.
17917     SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
17918   }
17919   // Invert the mask to only clear the lower bits.
17920   llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
17921   llvm::Value *Result =
17922       Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
17923   if (Args.Src->getType()->isPointerTy()) {
17924     /// TODO: Use ptrmask instead of ptrtoint+gep once it is optimized well.
17925     // Result = Builder.CreateIntrinsic(
17926     //  Intrinsic::ptrmask, {Args.SrcType, SrcForMask->getType(), Args.IntType},
17927     //  {SrcForMask, NegatedMask}, nullptr, "aligned_result");
17928     Result->setName("aligned_intptr");
17929     llvm::Value *Difference = Builder.CreateSub(Result, SrcAddr, "diff");
17930     // The result must point to the same underlying allocation. This means we
17931     // can use an inbounds GEP to enable better optimization.
17932     Value *Base = EmitCastToVoidPtr(Args.Src);
17933     if (getLangOpts().isSignedOverflowDefined())
17934       Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result");
17935     else
17936       Result = EmitCheckedInBoundsGEP(Base, Difference,
17937                                       /*SignedIndices=*/true,
17938                                       /*isSubtraction=*/!AlignUp,
17939                                       E->getExprLoc(), "aligned_result");
17940     Result = Builder.CreatePointerCast(Result, Args.SrcType);
17941     // Emit an alignment assumption to ensure that the new alignment is
17942     // propagated to loads/stores, etc.
17943     emitAlignmentAssumption(Result, E, E->getExprLoc(), Args.Alignment);
17944   }
17945   assert(Result->getType() == Args.SrcType);
17946   return RValue::get(Result);
17947 }
17948 
17949 Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
17950                                                    const CallExpr *E) {
17951   switch (BuiltinID) {
17952   case WebAssembly::BI__builtin_wasm_memory_size: {
17953     llvm::Type *ResultType = ConvertType(E->getType());
17954     Value *I = EmitScalarExpr(E->getArg(0));
17955     Function *Callee =
17956         CGM.getIntrinsic(Intrinsic::wasm_memory_size, ResultType);
17957     return Builder.CreateCall(Callee, I);
17958   }
17959   case WebAssembly::BI__builtin_wasm_memory_grow: {
17960     llvm::Type *ResultType = ConvertType(E->getType());
17961     Value *Args[] = {EmitScalarExpr(E->getArg(0)),
17962                      EmitScalarExpr(E->getArg(1))};
17963     Function *Callee =
17964         CGM.getIntrinsic(Intrinsic::wasm_memory_grow, ResultType);
17965     return Builder.CreateCall(Callee, Args);
17966   }
17967   case WebAssembly::BI__builtin_wasm_tls_size: {
17968     llvm::Type *ResultType = ConvertType(E->getType());
17969     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_size, ResultType);
17970     return Builder.CreateCall(Callee);
17971   }
17972   case WebAssembly::BI__builtin_wasm_tls_align: {
17973     llvm::Type *ResultType = ConvertType(E->getType());
17974     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_align, ResultType);
17975     return Builder.CreateCall(Callee);
17976   }
17977   case WebAssembly::BI__builtin_wasm_tls_base: {
17978     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_tls_base);
17979     return Builder.CreateCall(Callee);
17980   }
17981   case WebAssembly::BI__builtin_wasm_throw: {
17982     Value *Tag = EmitScalarExpr(E->getArg(0));
17983     Value *Obj = EmitScalarExpr(E->getArg(1));
17984     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_throw);
17985     return Builder.CreateCall(Callee, {Tag, Obj});
17986   }
17987   case WebAssembly::BI__builtin_wasm_rethrow: {
17988     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_rethrow);
17989     return Builder.CreateCall(Callee);
17990   }
17991   case WebAssembly::BI__builtin_wasm_memory_atomic_wait32: {
17992     Value *Addr = EmitScalarExpr(E->getArg(0));
17993     Value *Expected = EmitScalarExpr(E->getArg(1));
17994     Value *Timeout = EmitScalarExpr(E->getArg(2));
17995     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait32);
17996     return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
17997   }
17998   case WebAssembly::BI__builtin_wasm_memory_atomic_wait64: {
17999     Value *Addr = EmitScalarExpr(E->getArg(0));
18000     Value *Expected = EmitScalarExpr(E->getArg(1));
18001     Value *Timeout = EmitScalarExpr(E->getArg(2));
18002     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_wait64);
18003     return Builder.CreateCall(Callee, {Addr, Expected, Timeout});
18004   }
18005   case WebAssembly::BI__builtin_wasm_memory_atomic_notify: {
18006     Value *Addr = EmitScalarExpr(E->getArg(0));
18007     Value *Count = EmitScalarExpr(E->getArg(1));
18008     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_memory_atomic_notify);
18009     return Builder.CreateCall(Callee, {Addr, Count});
18010   }
18011   case WebAssembly::BI__builtin_wasm_trunc_s_i32_f32:
18012   case WebAssembly::BI__builtin_wasm_trunc_s_i32_f64:
18013   case WebAssembly::BI__builtin_wasm_trunc_s_i64_f32:
18014   case WebAssembly::BI__builtin_wasm_trunc_s_i64_f64: {
18015     Value *Src = EmitScalarExpr(E->getArg(0));
18016     llvm::Type *ResT = ConvertType(E->getType());
18017     Function *Callee =
18018         CGM.getIntrinsic(Intrinsic::wasm_trunc_signed, {ResT, Src->getType()});
18019     return Builder.CreateCall(Callee, {Src});
18020   }
18021   case WebAssembly::BI__builtin_wasm_trunc_u_i32_f32:
18022   case WebAssembly::BI__builtin_wasm_trunc_u_i32_f64:
18023   case WebAssembly::BI__builtin_wasm_trunc_u_i64_f32:
18024   case WebAssembly::BI__builtin_wasm_trunc_u_i64_f64: {
18025     Value *Src = EmitScalarExpr(E->getArg(0));
18026     llvm::Type *ResT = ConvertType(E->getType());
18027     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_unsigned,
18028                                         {ResT, Src->getType()});
18029     return Builder.CreateCall(Callee, {Src});
18030   }
18031   case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f32:
18032   case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32_f64:
18033   case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f32:
18034   case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i64_f64:
18035   case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
18036     Value *Src = EmitScalarExpr(E->getArg(0));
18037     llvm::Type *ResT = ConvertType(E->getType());
18038     Function *Callee =
18039         CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()});
18040     return Builder.CreateCall(Callee, {Src});
18041   }
18042   case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
18043   case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f64:
18044   case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f32:
18045   case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i64_f64:
18046   case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
18047     Value *Src = EmitScalarExpr(E->getArg(0));
18048     llvm::Type *ResT = ConvertType(E->getType());
18049     Function *Callee =
18050         CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()});
18051     return Builder.CreateCall(Callee, {Src});
18052   }
18053   case WebAssembly::BI__builtin_wasm_min_f32:
18054   case WebAssembly::BI__builtin_wasm_min_f64:
18055   case WebAssembly::BI__builtin_wasm_min_f32x4:
18056   case WebAssembly::BI__builtin_wasm_min_f64x2: {
18057     Value *LHS = EmitScalarExpr(E->getArg(0));
18058     Value *RHS = EmitScalarExpr(E->getArg(1));
18059     Function *Callee =
18060         CGM.getIntrinsic(Intrinsic::minimum, ConvertType(E->getType()));
18061     return Builder.CreateCall(Callee, {LHS, RHS});
18062   }
18063   case WebAssembly::BI__builtin_wasm_max_f32:
18064   case WebAssembly::BI__builtin_wasm_max_f64:
18065   case WebAssembly::BI__builtin_wasm_max_f32x4:
18066   case WebAssembly::BI__builtin_wasm_max_f64x2: {
18067     Value *LHS = EmitScalarExpr(E->getArg(0));
18068     Value *RHS = EmitScalarExpr(E->getArg(1));
18069     Function *Callee =
18070         CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
18071     return Builder.CreateCall(Callee, {LHS, RHS});
18072   }
18073   case WebAssembly::BI__builtin_wasm_pmin_f32x4:
18074   case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
18075     Value *LHS = EmitScalarExpr(E->getArg(0));
18076     Value *RHS = EmitScalarExpr(E->getArg(1));
18077     Function *Callee =
18078         CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
18079     return Builder.CreateCall(Callee, {LHS, RHS});
18080   }
18081   case WebAssembly::BI__builtin_wasm_pmax_f32x4:
18082   case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
18083     Value *LHS = EmitScalarExpr(E->getArg(0));
18084     Value *RHS = EmitScalarExpr(E->getArg(1));
18085     Function *Callee =
18086         CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
18087     return Builder.CreateCall(Callee, {LHS, RHS});
18088   }
18089   case WebAssembly::BI__builtin_wasm_ceil_f32x4:
18090   case WebAssembly::BI__builtin_wasm_floor_f32x4:
18091   case WebAssembly::BI__builtin_wasm_trunc_f32x4:
18092   case WebAssembly::BI__builtin_wasm_nearest_f32x4:
18093   case WebAssembly::BI__builtin_wasm_ceil_f64x2:
18094   case WebAssembly::BI__builtin_wasm_floor_f64x2:
18095   case WebAssembly::BI__builtin_wasm_trunc_f64x2:
18096   case WebAssembly::BI__builtin_wasm_nearest_f64x2: {
18097     unsigned IntNo;
18098     switch (BuiltinID) {
18099     case WebAssembly::BI__builtin_wasm_ceil_f32x4:
18100     case WebAssembly::BI__builtin_wasm_ceil_f64x2:
18101       IntNo = Intrinsic::ceil;
18102       break;
18103     case WebAssembly::BI__builtin_wasm_floor_f32x4:
18104     case WebAssembly::BI__builtin_wasm_floor_f64x2:
18105       IntNo = Intrinsic::floor;
18106       break;
18107     case WebAssembly::BI__builtin_wasm_trunc_f32x4:
18108     case WebAssembly::BI__builtin_wasm_trunc_f64x2:
18109       IntNo = Intrinsic::trunc;
18110       break;
18111     case WebAssembly::BI__builtin_wasm_nearest_f32x4:
18112     case WebAssembly::BI__builtin_wasm_nearest_f64x2:
18113       IntNo = Intrinsic::nearbyint;
18114       break;
18115     default:
18116       llvm_unreachable("unexpected builtin ID");
18117     }
18118     Value *Value = EmitScalarExpr(E->getArg(0));
18119     Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
18120     return Builder.CreateCall(Callee, Value);
18121   }
18122   case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
18123     Value *Src = EmitScalarExpr(E->getArg(0));
18124     Value *Indices = EmitScalarExpr(E->getArg(1));
18125     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
18126     return Builder.CreateCall(Callee, {Src, Indices});
18127   }
18128   case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
18129   case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
18130   case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
18131   case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
18132   case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
18133   case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
18134   case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
18135   case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: {
18136     unsigned IntNo;
18137     switch (BuiltinID) {
18138     case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
18139     case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
18140       IntNo = Intrinsic::sadd_sat;
18141       break;
18142     case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
18143     case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
18144       IntNo = Intrinsic::uadd_sat;
18145       break;
18146     case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
18147     case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
18148       IntNo = Intrinsic::wasm_sub_sat_signed;
18149       break;
18150     case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
18151     case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8:
18152       IntNo = Intrinsic::wasm_sub_sat_unsigned;
18153       break;
18154     default:
18155       llvm_unreachable("unexpected builtin ID");
18156     }
18157     Value *LHS = EmitScalarExpr(E->getArg(0));
18158     Value *RHS = EmitScalarExpr(E->getArg(1));
18159     Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
18160     return Builder.CreateCall(Callee, {LHS, RHS});
18161   }
18162   case WebAssembly::BI__builtin_wasm_abs_i8x16:
18163   case WebAssembly::BI__builtin_wasm_abs_i16x8:
18164   case WebAssembly::BI__builtin_wasm_abs_i32x4:
18165   case WebAssembly::BI__builtin_wasm_abs_i64x2: {
18166     Value *Vec = EmitScalarExpr(E->getArg(0));
18167     Value *Neg = Builder.CreateNeg(Vec, "neg");
18168     Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
18169     Value *ICmp = Builder.CreateICmpSLT(Vec, Zero, "abscond");
18170     return Builder.CreateSelect(ICmp, Neg, Vec, "abs");
18171   }
18172   case WebAssembly::BI__builtin_wasm_min_s_i8x16:
18173   case WebAssembly::BI__builtin_wasm_min_u_i8x16:
18174   case WebAssembly::BI__builtin_wasm_max_s_i8x16:
18175   case WebAssembly::BI__builtin_wasm_max_u_i8x16:
18176   case WebAssembly::BI__builtin_wasm_min_s_i16x8:
18177   case WebAssembly::BI__builtin_wasm_min_u_i16x8:
18178   case WebAssembly::BI__builtin_wasm_max_s_i16x8:
18179   case WebAssembly::BI__builtin_wasm_max_u_i16x8:
18180   case WebAssembly::BI__builtin_wasm_min_s_i32x4:
18181   case WebAssembly::BI__builtin_wasm_min_u_i32x4:
18182   case WebAssembly::BI__builtin_wasm_max_s_i32x4:
18183   case WebAssembly::BI__builtin_wasm_max_u_i32x4: {
18184     Value *LHS = EmitScalarExpr(E->getArg(0));
18185     Value *RHS = EmitScalarExpr(E->getArg(1));
18186     Value *ICmp;
18187     switch (BuiltinID) {
18188     case WebAssembly::BI__builtin_wasm_min_s_i8x16:
18189     case WebAssembly::BI__builtin_wasm_min_s_i16x8:
18190     case WebAssembly::BI__builtin_wasm_min_s_i32x4:
18191       ICmp = Builder.CreateICmpSLT(LHS, RHS);
18192       break;
18193     case WebAssembly::BI__builtin_wasm_min_u_i8x16:
18194     case WebAssembly::BI__builtin_wasm_min_u_i16x8:
18195     case WebAssembly::BI__builtin_wasm_min_u_i32x4:
18196       ICmp = Builder.CreateICmpULT(LHS, RHS);
18197       break;
18198     case WebAssembly::BI__builtin_wasm_max_s_i8x16:
18199     case WebAssembly::BI__builtin_wasm_max_s_i16x8:
18200     case WebAssembly::BI__builtin_wasm_max_s_i32x4:
18201       ICmp = Builder.CreateICmpSGT(LHS, RHS);
18202       break;
18203     case WebAssembly::BI__builtin_wasm_max_u_i8x16:
18204     case WebAssembly::BI__builtin_wasm_max_u_i16x8:
18205     case WebAssembly::BI__builtin_wasm_max_u_i32x4:
18206       ICmp = Builder.CreateICmpUGT(LHS, RHS);
18207       break;
18208     default:
18209       llvm_unreachable("unexpected builtin ID");
18210     }
18211     return Builder.CreateSelect(ICmp, LHS, RHS);
18212   }
18213   case WebAssembly::BI__builtin_wasm_avgr_u_i8x16:
18214   case WebAssembly::BI__builtin_wasm_avgr_u_i16x8: {
18215     Value *LHS = EmitScalarExpr(E->getArg(0));
18216     Value *RHS = EmitScalarExpr(E->getArg(1));
18217     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_avgr_unsigned,
18218                                         ConvertType(E->getType()));
18219     return Builder.CreateCall(Callee, {LHS, RHS});
18220   }
18221   case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: {
18222     Value *LHS = EmitScalarExpr(E->getArg(0));
18223     Value *RHS = EmitScalarExpr(E->getArg(1));
18224     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed);
18225     return Builder.CreateCall(Callee, {LHS, RHS});
18226   }
18227   case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
18228   case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
18229   case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
18230   case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4: {
18231     Value *Vec = EmitScalarExpr(E->getArg(0));
18232     unsigned IntNo;
18233     switch (BuiltinID) {
18234     case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
18235     case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_s_i32x4:
18236       IntNo = Intrinsic::wasm_extadd_pairwise_signed;
18237       break;
18238     case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_u_i16x8:
18239     case WebAssembly::BI__builtin_wasm_extadd_pairwise_i16x8_u_i32x4:
18240       IntNo = Intrinsic::wasm_extadd_pairwise_unsigned;
18241       break;
18242     default:
18243       llvm_unreachable("unexptected builtin ID");
18244     }
18245 
18246     Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
18247     return Builder.CreateCall(Callee, Vec);
18248   }
18249   case WebAssembly::BI__builtin_wasm_bitselect: {
18250     Value *V1 = EmitScalarExpr(E->getArg(0));
18251     Value *V2 = EmitScalarExpr(E->getArg(1));
18252     Value *C = EmitScalarExpr(E->getArg(2));
18253     Function *Callee =
18254         CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
18255     return Builder.CreateCall(Callee, {V1, V2, C});
18256   }
18257   case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
18258     Value *LHS = EmitScalarExpr(E->getArg(0));
18259     Value *RHS = EmitScalarExpr(E->getArg(1));
18260     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot);
18261     return Builder.CreateCall(Callee, {LHS, RHS});
18262   }
18263   case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
18264     Value *Vec = EmitScalarExpr(E->getArg(0));
18265     Function *Callee =
18266         CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType()));
18267     return Builder.CreateCall(Callee, {Vec});
18268   }
18269   case WebAssembly::BI__builtin_wasm_any_true_v128:
18270   case WebAssembly::BI__builtin_wasm_all_true_i8x16:
18271   case WebAssembly::BI__builtin_wasm_all_true_i16x8:
18272   case WebAssembly::BI__builtin_wasm_all_true_i32x4:
18273   case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
18274     unsigned IntNo;
18275     switch (BuiltinID) {
18276     case WebAssembly::BI__builtin_wasm_any_true_v128:
18277       IntNo = Intrinsic::wasm_anytrue;
18278       break;
18279     case WebAssembly::BI__builtin_wasm_all_true_i8x16:
18280     case WebAssembly::BI__builtin_wasm_all_true_i16x8:
18281     case WebAssembly::BI__builtin_wasm_all_true_i32x4:
18282     case WebAssembly::BI__builtin_wasm_all_true_i64x2:
18283       IntNo = Intrinsic::wasm_alltrue;
18284       break;
18285     default:
18286       llvm_unreachable("unexpected builtin ID");
18287     }
18288     Value *Vec = EmitScalarExpr(E->getArg(0));
18289     Function *Callee = CGM.getIntrinsic(IntNo, Vec->getType());
18290     return Builder.CreateCall(Callee, {Vec});
18291   }
18292   case WebAssembly::BI__builtin_wasm_bitmask_i8x16:
18293   case WebAssembly::BI__builtin_wasm_bitmask_i16x8:
18294   case WebAssembly::BI__builtin_wasm_bitmask_i32x4:
18295   case WebAssembly::BI__builtin_wasm_bitmask_i64x2: {
18296     Value *Vec = EmitScalarExpr(E->getArg(0));
18297     Function *Callee =
18298         CGM.getIntrinsic(Intrinsic::wasm_bitmask, Vec->getType());
18299     return Builder.CreateCall(Callee, {Vec});
18300   }
18301   case WebAssembly::BI__builtin_wasm_abs_f32x4:
18302   case WebAssembly::BI__builtin_wasm_abs_f64x2: {
18303     Value *Vec = EmitScalarExpr(E->getArg(0));
18304     Function *Callee = CGM.getIntrinsic(Intrinsic::fabs, Vec->getType());
18305     return Builder.CreateCall(Callee, {Vec});
18306   }
18307   case WebAssembly::BI__builtin_wasm_sqrt_f32x4:
18308   case WebAssembly::BI__builtin_wasm_sqrt_f64x2: {
18309     Value *Vec = EmitScalarExpr(E->getArg(0));
18310     Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
18311     return Builder.CreateCall(Callee, {Vec});
18312   }
18313   case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
18314   case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
18315   case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
18316   case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4: {
18317     Value *Low = EmitScalarExpr(E->getArg(0));
18318     Value *High = EmitScalarExpr(E->getArg(1));
18319     unsigned IntNo;
18320     switch (BuiltinID) {
18321     case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
18322     case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
18323       IntNo = Intrinsic::wasm_narrow_signed;
18324       break;
18325     case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
18326     case WebAssembly::BI__builtin_wasm_narrow_u_i16x8_i32x4:
18327       IntNo = Intrinsic::wasm_narrow_unsigned;
18328       break;
18329     default:
18330       llvm_unreachable("unexpected builtin ID");
18331     }
18332     Function *Callee =
18333         CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
18334     return Builder.CreateCall(Callee, {Low, High});
18335   }
18336   case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
18337   case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: {
18338     Value *Vec = EmitScalarExpr(E->getArg(0));
18339     unsigned IntNo;
18340     switch (BuiltinID) {
18341     case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
18342       IntNo = Intrinsic::fptosi_sat;
18343       break;
18344     case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4:
18345       IntNo = Intrinsic::fptoui_sat;
18346       break;
18347     default:
18348       llvm_unreachable("unexpected builtin ID");
18349     }
18350     llvm::Type *SrcT = Vec->getType();
18351     llvm::Type *TruncT =
18352         SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32));
18353     Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
18354     Value *Trunc = Builder.CreateCall(Callee, Vec);
18355     Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0));
18356     Value *ConcatMask =
18357         llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1),
18358                                    Builder.getInt32(2), Builder.getInt32(3)});
18359     return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask);
18360   }
18361   case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
18362     Value *Ops[18];
18363     size_t OpIdx = 0;
18364     Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
18365     Ops[OpIdx++] = EmitScalarExpr(E->getArg(1));
18366     while (OpIdx < 18) {
18367       Optional<llvm::APSInt> LaneConst =
18368           E->getArg(OpIdx)->getIntegerConstantExpr(getContext());
18369       assert(LaneConst && "Constant arg isn't actually constant?");
18370       Ops[OpIdx++] = llvm::ConstantInt::get(getLLVMContext(), *LaneConst);
18371     }
18372     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
18373     return Builder.CreateCall(Callee, Ops);
18374   }
18375   case WebAssembly::BI__builtin_wasm_fma_f32x4:
18376   case WebAssembly::BI__builtin_wasm_fms_f32x4:
18377   case WebAssembly::BI__builtin_wasm_fma_f64x2:
18378   case WebAssembly::BI__builtin_wasm_fms_f64x2: {
18379     Value *A = EmitScalarExpr(E->getArg(0));
18380     Value *B = EmitScalarExpr(E->getArg(1));
18381     Value *C = EmitScalarExpr(E->getArg(2));
18382     unsigned IntNo;
18383     switch (BuiltinID) {
18384     case WebAssembly::BI__builtin_wasm_fma_f32x4:
18385     case WebAssembly::BI__builtin_wasm_fma_f64x2:
18386       IntNo = Intrinsic::wasm_fma;
18387       break;
18388     case WebAssembly::BI__builtin_wasm_fms_f32x4:
18389     case WebAssembly::BI__builtin_wasm_fms_f64x2:
18390       IntNo = Intrinsic::wasm_fms;
18391       break;
18392     default:
18393       llvm_unreachable("unexpected builtin ID");
18394     }
18395     Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
18396     return Builder.CreateCall(Callee, {A, B, C});
18397   }
18398   case WebAssembly::BI__builtin_wasm_laneselect_i8x16:
18399   case WebAssembly::BI__builtin_wasm_laneselect_i16x8:
18400   case WebAssembly::BI__builtin_wasm_laneselect_i32x4:
18401   case WebAssembly::BI__builtin_wasm_laneselect_i64x2: {
18402     Value *A = EmitScalarExpr(E->getArg(0));
18403     Value *B = EmitScalarExpr(E->getArg(1));
18404     Value *C = EmitScalarExpr(E->getArg(2));
18405     Function *Callee =
18406         CGM.getIntrinsic(Intrinsic::wasm_laneselect, A->getType());
18407     return Builder.CreateCall(Callee, {A, B, C});
18408   }
18409   case WebAssembly::BI__builtin_wasm_relaxed_swizzle_i8x16: {
18410     Value *Src = EmitScalarExpr(E->getArg(0));
18411     Value *Indices = EmitScalarExpr(E->getArg(1));
18412     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_swizzle);
18413     return Builder.CreateCall(Callee, {Src, Indices});
18414   }
18415   case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
18416   case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
18417   case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
18418   case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2: {
18419     Value *LHS = EmitScalarExpr(E->getArg(0));
18420     Value *RHS = EmitScalarExpr(E->getArg(1));
18421     unsigned IntNo;
18422     switch (BuiltinID) {
18423     case WebAssembly::BI__builtin_wasm_relaxed_min_f32x4:
18424     case WebAssembly::BI__builtin_wasm_relaxed_min_f64x2:
18425       IntNo = Intrinsic::wasm_relaxed_min;
18426       break;
18427     case WebAssembly::BI__builtin_wasm_relaxed_max_f32x4:
18428     case WebAssembly::BI__builtin_wasm_relaxed_max_f64x2:
18429       IntNo = Intrinsic::wasm_relaxed_max;
18430       break;
18431     default:
18432       llvm_unreachable("unexpected builtin ID");
18433     }
18434     Function *Callee = CGM.getIntrinsic(IntNo, LHS->getType());
18435     return Builder.CreateCall(Callee, {LHS, RHS});
18436   }
18437   case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
18438   case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
18439   case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_s_i32x4_f64x2:
18440   case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_u_i32x4_f64x2: {
18441     Value *Vec = EmitScalarExpr(E->getArg(0));
18442     unsigned IntNo;
18443     switch (BuiltinID) {
18444     case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
18445       IntNo = Intrinsic::wasm_relaxed_trunc_signed;
18446       break;
18447     case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
18448       IntNo = Intrinsic::wasm_relaxed_trunc_unsigned;
18449       break;
18450     case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_s_i32x4_f64x2:
18451       IntNo = Intrinsic::wasm_relaxed_trunc_zero_signed;
18452       break;
18453     case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_u_i32x4_f64x2:
18454       IntNo = Intrinsic::wasm_relaxed_trunc_zero_unsigned;
18455       break;
18456     default:
18457       llvm_unreachable("unexpected builtin ID");
18458     }
18459     Function *Callee = CGM.getIntrinsic(IntNo);
18460     return Builder.CreateCall(Callee, {Vec});
18461   }
18462   default:
18463     return nullptr;
18464   }
18465 }
18466 
18467 static std::pair<Intrinsic::ID, unsigned>
18468 getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
18469   struct Info {
18470     unsigned BuiltinID;
18471     Intrinsic::ID IntrinsicID;
18472     unsigned VecLen;
18473   };
18474   Info Infos[] = {
18475 #define CUSTOM_BUILTIN_MAPPING(x,s) \
18476   { Hexagon::BI__builtin_HEXAGON_##x, Intrinsic::hexagon_##x, s },
18477     CUSTOM_BUILTIN_MAPPING(L2_loadrub_pci, 0)
18478     CUSTOM_BUILTIN_MAPPING(L2_loadrb_pci, 0)
18479     CUSTOM_BUILTIN_MAPPING(L2_loadruh_pci, 0)
18480     CUSTOM_BUILTIN_MAPPING(L2_loadrh_pci, 0)
18481     CUSTOM_BUILTIN_MAPPING(L2_loadri_pci, 0)
18482     CUSTOM_BUILTIN_MAPPING(L2_loadrd_pci, 0)
18483     CUSTOM_BUILTIN_MAPPING(L2_loadrub_pcr, 0)
18484     CUSTOM_BUILTIN_MAPPING(L2_loadrb_pcr, 0)
18485     CUSTOM_BUILTIN_MAPPING(L2_loadruh_pcr, 0)
18486     CUSTOM_BUILTIN_MAPPING(L2_loadrh_pcr, 0)
18487     CUSTOM_BUILTIN_MAPPING(L2_loadri_pcr, 0)
18488     CUSTOM_BUILTIN_MAPPING(L2_loadrd_pcr, 0)
18489     CUSTOM_BUILTIN_MAPPING(S2_storerb_pci, 0)
18490     CUSTOM_BUILTIN_MAPPING(S2_storerh_pci, 0)
18491     CUSTOM_BUILTIN_MAPPING(S2_storerf_pci, 0)
18492     CUSTOM_BUILTIN_MAPPING(S2_storeri_pci, 0)
18493     CUSTOM_BUILTIN_MAPPING(S2_storerd_pci, 0)
18494     CUSTOM_BUILTIN_MAPPING(S2_storerb_pcr, 0)
18495     CUSTOM_BUILTIN_MAPPING(S2_storerh_pcr, 0)
18496     CUSTOM_BUILTIN_MAPPING(S2_storerf_pcr, 0)
18497     CUSTOM_BUILTIN_MAPPING(S2_storeri_pcr, 0)
18498     CUSTOM_BUILTIN_MAPPING(S2_storerd_pcr, 0)
18499     CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq, 64)
18500     CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq, 64)
18501     CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq, 64)
18502     CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq, 64)
18503     CUSTOM_BUILTIN_MAPPING(V6_vmaskedstoreq_128B, 128)
18504     CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorenq_128B, 128)
18505     CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentq_128B, 128)
18506     CUSTOM_BUILTIN_MAPPING(V6_vmaskedstorentnq_128B, 128)
18507 #include "clang/Basic/BuiltinsHexagonMapCustomDep.def"
18508 #undef CUSTOM_BUILTIN_MAPPING
18509   };
18510 
18511   auto CmpInfo = [] (Info A, Info B) { return A.BuiltinID < B.BuiltinID; };
18512   static const bool SortOnce = (llvm::sort(Infos, CmpInfo), true);
18513   (void)SortOnce;
18514 
18515   const Info *F = std::lower_bound(std::begin(Infos), std::end(Infos),
18516                                    Info{BuiltinID, 0, 0}, CmpInfo);
18517   if (F == std::end(Infos) || F->BuiltinID != BuiltinID)
18518     return {Intrinsic::not_intrinsic, 0};
18519 
18520   return {F->IntrinsicID, F->VecLen};
18521 }
18522 
18523 Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
18524                                                const CallExpr *E) {
18525   Intrinsic::ID ID;
18526   unsigned VecLen;
18527   std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
18528 
18529   auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
18530     // The base pointer is passed by address, so it needs to be loaded.
18531     Address A = EmitPointerWithAlignment(E->getArg(0));
18532     Address BP = Address(
18533         Builder.CreateBitCast(A.getPointer(), Int8PtrPtrTy), A.getAlignment());
18534     llvm::Value *Base = Builder.CreateLoad(BP);
18535     // The treatment of both loads and stores is the same: the arguments for
18536     // the builtin are the same as the arguments for the intrinsic.
18537     // Load:
18538     //   builtin(Base, Inc, Mod, Start) -> intr(Base, Inc, Mod, Start)
18539     //   builtin(Base, Mod, Start)      -> intr(Base, Mod, Start)
18540     // Store:
18541     //   builtin(Base, Inc, Mod, Val, Start) -> intr(Base, Inc, Mod, Val, Start)
18542     //   builtin(Base, Mod, Val, Start)      -> intr(Base, Mod, Val, Start)
18543     SmallVector<llvm::Value*,5> Ops = { Base };
18544     for (unsigned i = 1, e = E->getNumArgs(); i != e; ++i)
18545       Ops.push_back(EmitScalarExpr(E->getArg(i)));
18546 
18547     llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(IntID), Ops);
18548     // The load intrinsics generate two results (Value, NewBase), stores
18549     // generate one (NewBase). The new base address needs to be stored.
18550     llvm::Value *NewBase = IsLoad ? Builder.CreateExtractValue(Result, 1)
18551                                   : Result;
18552     llvm::Value *LV = Builder.CreateBitCast(
18553         EmitScalarExpr(E->getArg(0)), NewBase->getType()->getPointerTo());
18554     Address Dest = EmitPointerWithAlignment(E->getArg(0));
18555     llvm::Value *RetVal =
18556         Builder.CreateAlignedStore(NewBase, LV, Dest.getAlignment());
18557     if (IsLoad)
18558       RetVal = Builder.CreateExtractValue(Result, 0);
18559     return RetVal;
18560   };
18561 
18562   // Handle the conversion of bit-reverse load intrinsics to bit code.
18563   // The intrinsic call after this function only reads from memory and the
18564   // write to memory is dealt by the store instruction.
18565   auto MakeBrevLd = [this, E](unsigned IntID, llvm::Type *DestTy) {
18566     // The intrinsic generates one result, which is the new value for the base
18567     // pointer. It needs to be returned. The result of the load instruction is
18568     // passed to intrinsic by address, so the value needs to be stored.
18569     llvm::Value *BaseAddress =
18570         Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
18571 
18572     // Expressions like &(*pt++) will be incremented per evaluation.
18573     // EmitPointerWithAlignment and EmitScalarExpr evaluates the expression
18574     // per call.
18575     Address DestAddr = EmitPointerWithAlignment(E->getArg(1));
18576     DestAddr = Address(Builder.CreateBitCast(DestAddr.getPointer(), Int8PtrTy),
18577                        DestAddr.getAlignment());
18578     llvm::Value *DestAddress = DestAddr.getPointer();
18579 
18580     // Operands are Base, Dest, Modifier.
18581     // The intrinsic format in LLVM IR is defined as
18582     // { ValueType, i8* } (i8*, i32).
18583     llvm::Value *Result = Builder.CreateCall(
18584         CGM.getIntrinsic(IntID), {BaseAddress, EmitScalarExpr(E->getArg(2))});
18585 
18586     // The value needs to be stored as the variable is passed by reference.
18587     llvm::Value *DestVal = Builder.CreateExtractValue(Result, 0);
18588 
18589     // The store needs to be truncated to fit the destination type.
18590     // While i32 and i64 are natively supported on Hexagon, i8 and i16 needs
18591     // to be handled with stores of respective destination type.
18592     DestVal = Builder.CreateTrunc(DestVal, DestTy);
18593 
18594     llvm::Value *DestForStore =
18595         Builder.CreateBitCast(DestAddress, DestVal->getType()->getPointerTo());
18596     Builder.CreateAlignedStore(DestVal, DestForStore, DestAddr.getAlignment());
18597     // The updated value of the base pointer is returned.
18598     return Builder.CreateExtractValue(Result, 1);
18599   };
18600 
18601   auto V2Q = [this, VecLen] (llvm::Value *Vec) {
18602     Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandvrt_128B
18603                                      : Intrinsic::hexagon_V6_vandvrt;
18604     return Builder.CreateCall(CGM.getIntrinsic(ID),
18605                               {Vec, Builder.getInt32(-1)});
18606   };
18607   auto Q2V = [this, VecLen] (llvm::Value *Pred) {
18608     Intrinsic::ID ID = VecLen == 128 ? Intrinsic::hexagon_V6_vandqrt_128B
18609                                      : Intrinsic::hexagon_V6_vandqrt;
18610     return Builder.CreateCall(CGM.getIntrinsic(ID),
18611                               {Pred, Builder.getInt32(-1)});
18612   };
18613 
18614   switch (BuiltinID) {
18615   // These intrinsics return a tuple {Vector, VectorPred} in LLVM IR,
18616   // and the corresponding C/C++ builtins use loads/stores to update
18617   // the predicate.
18618   case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry:
18619   case Hexagon::BI__builtin_HEXAGON_V6_vaddcarry_128B:
18620   case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry:
18621   case Hexagon::BI__builtin_HEXAGON_V6_vsubcarry_128B: {
18622     // Get the type from the 0-th argument.
18623     llvm::Type *VecType = ConvertType(E->getArg(0)->getType());
18624     Address PredAddr = Builder.CreateBitCast(
18625         EmitPointerWithAlignment(E->getArg(2)), VecType->getPointerTo(0));
18626     llvm::Value *PredIn = V2Q(Builder.CreateLoad(PredAddr));
18627     llvm::Value *Result = Builder.CreateCall(CGM.getIntrinsic(ID),
18628         {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1)), PredIn});
18629 
18630     llvm::Value *PredOut = Builder.CreateExtractValue(Result, 1);
18631     Builder.CreateAlignedStore(Q2V(PredOut), PredAddr.getPointer(),
18632         PredAddr.getAlignment());
18633     return Builder.CreateExtractValue(Result, 0);
18634   }
18635 
18636   case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pci:
18637   case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pci:
18638   case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pci:
18639   case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pci:
18640   case Hexagon::BI__builtin_HEXAGON_L2_loadri_pci:
18641   case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pci:
18642   case Hexagon::BI__builtin_HEXAGON_L2_loadrub_pcr:
18643   case Hexagon::BI__builtin_HEXAGON_L2_loadrb_pcr:
18644   case Hexagon::BI__builtin_HEXAGON_L2_loadruh_pcr:
18645   case Hexagon::BI__builtin_HEXAGON_L2_loadrh_pcr:
18646   case Hexagon::BI__builtin_HEXAGON_L2_loadri_pcr:
18647   case Hexagon::BI__builtin_HEXAGON_L2_loadrd_pcr:
18648     return MakeCircOp(ID, /*IsLoad=*/true);
18649   case Hexagon::BI__builtin_HEXAGON_S2_storerb_pci:
18650   case Hexagon::BI__builtin_HEXAGON_S2_storerh_pci:
18651   case Hexagon::BI__builtin_HEXAGON_S2_storerf_pci:
18652   case Hexagon::BI__builtin_HEXAGON_S2_storeri_pci:
18653   case Hexagon::BI__builtin_HEXAGON_S2_storerd_pci:
18654   case Hexagon::BI__builtin_HEXAGON_S2_storerb_pcr:
18655   case Hexagon::BI__builtin_HEXAGON_S2_storerh_pcr:
18656   case Hexagon::BI__builtin_HEXAGON_S2_storerf_pcr:
18657   case Hexagon::BI__builtin_HEXAGON_S2_storeri_pcr:
18658   case Hexagon::BI__builtin_HEXAGON_S2_storerd_pcr:
18659     return MakeCircOp(ID, /*IsLoad=*/false);
18660   case Hexagon::BI__builtin_brev_ldub:
18661     return MakeBrevLd(Intrinsic::hexagon_L2_loadrub_pbr, Int8Ty);
18662   case Hexagon::BI__builtin_brev_ldb:
18663     return MakeBrevLd(Intrinsic::hexagon_L2_loadrb_pbr, Int8Ty);
18664   case Hexagon::BI__builtin_brev_lduh:
18665     return MakeBrevLd(Intrinsic::hexagon_L2_loadruh_pbr, Int16Ty);
18666   case Hexagon::BI__builtin_brev_ldh:
18667     return MakeBrevLd(Intrinsic::hexagon_L2_loadrh_pbr, Int16Ty);
18668   case Hexagon::BI__builtin_brev_ldw:
18669     return MakeBrevLd(Intrinsic::hexagon_L2_loadri_pbr, Int32Ty);
18670   case Hexagon::BI__builtin_brev_ldd:
18671     return MakeBrevLd(Intrinsic::hexagon_L2_loadrd_pbr, Int64Ty);
18672 
18673   default: {
18674     if (ID == Intrinsic::not_intrinsic)
18675       return nullptr;
18676 
18677     auto IsVectorPredTy = [](llvm::Type *T) {
18678       return T->isVectorTy() &&
18679              cast<llvm::VectorType>(T)->getElementType()->isIntegerTy(1);
18680     };
18681 
18682     llvm::Function *IntrFn = CGM.getIntrinsic(ID);
18683     llvm::FunctionType *IntrTy = IntrFn->getFunctionType();
18684     SmallVector<llvm::Value*,4> Ops;
18685     for (unsigned i = 0, e = IntrTy->getNumParams(); i != e; ++i) {
18686       llvm::Type *T = IntrTy->getParamType(i);
18687       const Expr *A = E->getArg(i);
18688       if (IsVectorPredTy(T)) {
18689         // There will be an implicit cast to a boolean vector. Strip it.
18690         if (auto *Cast = dyn_cast<ImplicitCastExpr>(A)) {
18691           if (Cast->getCastKind() == CK_BitCast)
18692             A = Cast->getSubExpr();
18693         }
18694         Ops.push_back(V2Q(EmitScalarExpr(A)));
18695       } else {
18696         Ops.push_back(EmitScalarExpr(A));
18697       }
18698     }
18699 
18700     llvm::Value *Call = Builder.CreateCall(IntrFn, Ops);
18701     if (IsVectorPredTy(IntrTy->getReturnType()))
18702       Call = Q2V(Call);
18703 
18704     return Call;
18705   } // default
18706   } // switch
18707 
18708   return nullptr;
18709 }
18710 
18711 Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
18712                                              const CallExpr *E,
18713                                              ReturnValueSlot ReturnValue) {
18714   SmallVector<Value *, 4> Ops;
18715   llvm::Type *ResultType = ConvertType(E->getType());
18716 
18717   for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
18718     Ops.push_back(EmitScalarExpr(E->getArg(i)));
18719 
18720   Intrinsic::ID ID = Intrinsic::not_intrinsic;
18721   unsigned NF = 1;
18722   constexpr unsigned TAIL_UNDISTURBED = 0;
18723 
18724   // Required for overloaded intrinsics.
18725   llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
18726   switch (BuiltinID) {
18727   default: llvm_unreachable("unexpected builtin ID");
18728   case RISCV::BI__builtin_riscv_orc_b_32:
18729   case RISCV::BI__builtin_riscv_orc_b_64:
18730   case RISCV::BI__builtin_riscv_clmul:
18731   case RISCV::BI__builtin_riscv_clmulh:
18732   case RISCV::BI__builtin_riscv_clmulr:
18733   case RISCV::BI__builtin_riscv_bcompress_32:
18734   case RISCV::BI__builtin_riscv_bcompress_64:
18735   case RISCV::BI__builtin_riscv_bdecompress_32:
18736   case RISCV::BI__builtin_riscv_bdecompress_64:
18737   case RISCV::BI__builtin_riscv_grev_32:
18738   case RISCV::BI__builtin_riscv_grev_64:
18739   case RISCV::BI__builtin_riscv_gorc_32:
18740   case RISCV::BI__builtin_riscv_gorc_64:
18741   case RISCV::BI__builtin_riscv_shfl_32:
18742   case RISCV::BI__builtin_riscv_shfl_64:
18743   case RISCV::BI__builtin_riscv_unshfl_32:
18744   case RISCV::BI__builtin_riscv_unshfl_64:
18745   case RISCV::BI__builtin_riscv_xperm_n:
18746   case RISCV::BI__builtin_riscv_xperm_b:
18747   case RISCV::BI__builtin_riscv_xperm_h:
18748   case RISCV::BI__builtin_riscv_xperm_w:
18749   case RISCV::BI__builtin_riscv_crc32_b:
18750   case RISCV::BI__builtin_riscv_crc32_h:
18751   case RISCV::BI__builtin_riscv_crc32_w:
18752   case RISCV::BI__builtin_riscv_crc32_d:
18753   case RISCV::BI__builtin_riscv_crc32c_b:
18754   case RISCV::BI__builtin_riscv_crc32c_h:
18755   case RISCV::BI__builtin_riscv_crc32c_w:
18756   case RISCV::BI__builtin_riscv_crc32c_d: {
18757     switch (BuiltinID) {
18758     default: llvm_unreachable("unexpected builtin ID");
18759     // Zbb
18760     case RISCV::BI__builtin_riscv_orc_b_32:
18761     case RISCV::BI__builtin_riscv_orc_b_64:
18762       ID = Intrinsic::riscv_orc_b;
18763       break;
18764 
18765     // Zbc
18766     case RISCV::BI__builtin_riscv_clmul:
18767       ID = Intrinsic::riscv_clmul;
18768       break;
18769     case RISCV::BI__builtin_riscv_clmulh:
18770       ID = Intrinsic::riscv_clmulh;
18771       break;
18772     case RISCV::BI__builtin_riscv_clmulr:
18773       ID = Intrinsic::riscv_clmulr;
18774       break;
18775 
18776     // Zbe
18777     case RISCV::BI__builtin_riscv_bcompress_32:
18778     case RISCV::BI__builtin_riscv_bcompress_64:
18779       ID = Intrinsic::riscv_bcompress;
18780       break;
18781     case RISCV::BI__builtin_riscv_bdecompress_32:
18782     case RISCV::BI__builtin_riscv_bdecompress_64:
18783       ID = Intrinsic::riscv_bdecompress;
18784       break;
18785 
18786     // Zbp
18787     case RISCV::BI__builtin_riscv_grev_32:
18788     case RISCV::BI__builtin_riscv_grev_64:
18789       ID = Intrinsic::riscv_grev;
18790       break;
18791     case RISCV::BI__builtin_riscv_gorc_32:
18792     case RISCV::BI__builtin_riscv_gorc_64:
18793       ID = Intrinsic::riscv_gorc;
18794       break;
18795     case RISCV::BI__builtin_riscv_shfl_32:
18796     case RISCV::BI__builtin_riscv_shfl_64:
18797       ID = Intrinsic::riscv_shfl;
18798       break;
18799     case RISCV::BI__builtin_riscv_unshfl_32:
18800     case RISCV::BI__builtin_riscv_unshfl_64:
18801       ID = Intrinsic::riscv_unshfl;
18802       break;
18803     case RISCV::BI__builtin_riscv_xperm_n:
18804       ID = Intrinsic::riscv_xperm_n;
18805       break;
18806     case RISCV::BI__builtin_riscv_xperm_b:
18807       ID = Intrinsic::riscv_xperm_b;
18808       break;
18809     case RISCV::BI__builtin_riscv_xperm_h:
18810       ID = Intrinsic::riscv_xperm_h;
18811       break;
18812     case RISCV::BI__builtin_riscv_xperm_w:
18813       ID = Intrinsic::riscv_xperm_w;
18814       break;
18815 
18816     // Zbr
18817     case RISCV::BI__builtin_riscv_crc32_b:
18818       ID = Intrinsic::riscv_crc32_b;
18819       break;
18820     case RISCV::BI__builtin_riscv_crc32_h:
18821       ID = Intrinsic::riscv_crc32_h;
18822       break;
18823     case RISCV::BI__builtin_riscv_crc32_w:
18824       ID = Intrinsic::riscv_crc32_w;
18825       break;
18826     case RISCV::BI__builtin_riscv_crc32_d:
18827       ID = Intrinsic::riscv_crc32_d;
18828       break;
18829     case RISCV::BI__builtin_riscv_crc32c_b:
18830       ID = Intrinsic::riscv_crc32c_b;
18831       break;
18832     case RISCV::BI__builtin_riscv_crc32c_h:
18833       ID = Intrinsic::riscv_crc32c_h;
18834       break;
18835     case RISCV::BI__builtin_riscv_crc32c_w:
18836       ID = Intrinsic::riscv_crc32c_w;
18837       break;
18838     case RISCV::BI__builtin_riscv_crc32c_d:
18839       ID = Intrinsic::riscv_crc32c_d;
18840       break;
18841     }
18842 
18843     IntrinsicTypes = {ResultType};
18844     break;
18845   }
18846   // Vector builtins are handled from here.
18847 #include "clang/Basic/riscv_vector_builtin_cg.inc"
18848   }
18849 
18850   assert(ID != Intrinsic::not_intrinsic);
18851 
18852   llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
18853   return Builder.CreateCall(F, Ops, "");
18854 }
18855