1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the auto-upgrade helper functions. 11 // This is where deprecated IR intrinsics and other IR features are updated to 12 // current specifications. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/IR/AutoUpgrade.h" 17 #include "llvm/ADT/StringSwitch.h" 18 #include "llvm/IR/Constants.h" 19 #include "llvm/IR/DIBuilder.h" 20 #include "llvm/IR/DebugInfo.h" 21 #include "llvm/IR/DiagnosticInfo.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/IRBuilder.h" 24 #include "llvm/IR/Instruction.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/IR/Module.h" 28 #include "llvm/IR/Verifier.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/Regex.h" 31 #include <cstring> 32 using namespace llvm; 33 34 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 35 36 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 37 // changed their type from v4f32 to v2i64. 38 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 39 Function *&NewFn) { 40 // Check whether this is an old version of the function, which received 41 // v4f32 arguments. 42 Type *Arg0Type = F->getFunctionType()->getParamType(0); 43 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 44 return false; 45 46 // Yes, it's old, replace it with new version. 47 rename(F); 48 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 49 return true; 50 } 51 52 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 53 // arguments have changed their type from i32 to i8. 54 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 55 Function *&NewFn) { 56 // Check that the last argument is an i32. 57 Type *LastArgType = F->getFunctionType()->getParamType( 58 F->getFunctionType()->getNumParams() - 1); 59 if (!LastArgType->isIntegerTy(32)) 60 return false; 61 62 // Move this function aside and map down. 63 rename(F); 64 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 65 return true; 66 } 67 68 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 69 // All of the intrinsics matches below should be marked with which llvm 70 // version started autoupgrading them. At some point in the future we would 71 // like to use this information to remove upgrade code for some older 72 // intrinsics. It is currently undecided how we will determine that future 73 // point. 74 if (Name=="ssse3.pabs.b.128" || // Added in 6.0 75 Name=="ssse3.pabs.w.128" || // Added in 6.0 76 Name=="ssse3.pabs.d.128" || // Added in 6.0 77 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0 78 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0 79 Name.startswith("avx512.kunpck") || //added in 6.0 80 Name.startswith("avx2.pabs.") || // Added in 6.0 81 Name.startswith("avx512.mask.pabs.") || // Added in 6.0 82 Name.startswith("avx512.broadcastm") || // Added in 6.0 83 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0 84 Name.startswith("sse2.pcmpeq.") || // Added in 3.1 85 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 86 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 87 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 88 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 89 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 90 Name.startswith("avx.vperm2f128.") || // Added in 6.0 91 Name == "avx2.vperm2i128" || // Added in 6.0 92 Name == "sse.add.ss" || // Added in 4.0 93 Name == "sse2.add.sd" || // Added in 4.0 94 Name == "sse.sub.ss" || // Added in 4.0 95 Name == "sse2.sub.sd" || // Added in 4.0 96 Name == "sse.mul.ss" || // Added in 4.0 97 Name == "sse2.mul.sd" || // Added in 4.0 98 Name == "sse.div.ss" || // Added in 4.0 99 Name == "sse2.div.sd" || // Added in 4.0 100 Name == "sse41.pmaxsb" || // Added in 3.9 101 Name == "sse2.pmaxs.w" || // Added in 3.9 102 Name == "sse41.pmaxsd" || // Added in 3.9 103 Name == "sse2.pmaxu.b" || // Added in 3.9 104 Name == "sse41.pmaxuw" || // Added in 3.9 105 Name == "sse41.pmaxud" || // Added in 3.9 106 Name == "sse41.pminsb" || // Added in 3.9 107 Name == "sse2.pmins.w" || // Added in 3.9 108 Name == "sse41.pminsd" || // Added in 3.9 109 Name == "sse2.pminu.b" || // Added in 3.9 110 Name == "sse41.pminuw" || // Added in 3.9 111 Name == "sse41.pminud" || // Added in 3.9 112 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 113 Name.startswith("avx2.pmax") || // Added in 3.9 114 Name.startswith("avx2.pmin") || // Added in 3.9 115 Name.startswith("avx512.mask.pmax") || // Added in 4.0 116 Name.startswith("avx512.mask.pmin") || // Added in 4.0 117 Name.startswith("avx2.vbroadcast") || // Added in 3.8 118 Name.startswith("avx2.pbroadcast") || // Added in 3.8 119 Name.startswith("avx.vpermil.") || // Added in 3.1 120 Name.startswith("sse2.pshuf") || // Added in 3.9 121 Name.startswith("avx512.pbroadcast") || // Added in 3.9 122 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 123 Name.startswith("avx512.mask.movddup") || // Added in 3.9 124 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 125 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 126 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 127 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 128 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 129 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 130 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 131 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 132 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 133 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 134 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 135 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 136 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 137 Name.startswith("avx512.mask.pand.") || // Added in 3.9 138 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 139 Name.startswith("avx512.mask.por.") || // Added in 3.9 140 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 141 Name.startswith("avx512.mask.and.") || // Added in 3.9 142 Name.startswith("avx512.mask.andn.") || // Added in 3.9 143 Name.startswith("avx512.mask.or.") || // Added in 3.9 144 Name.startswith("avx512.mask.xor.") || // Added in 3.9 145 Name.startswith("avx512.mask.padd.") || // Added in 4.0 146 Name.startswith("avx512.mask.psub.") || // Added in 4.0 147 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 148 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 149 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 150 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 151 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 152 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 153 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 154 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 155 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 156 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0 157 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0 158 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0 159 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0 160 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0 161 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0 162 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0 163 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0 164 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0 165 Name == "avx512.mask.add.pd.128" || // Added in 4.0 166 Name == "avx512.mask.add.pd.256" || // Added in 4.0 167 Name == "avx512.mask.add.ps.128" || // Added in 4.0 168 Name == "avx512.mask.add.ps.256" || // Added in 4.0 169 Name == "avx512.mask.div.pd.128" || // Added in 4.0 170 Name == "avx512.mask.div.pd.256" || // Added in 4.0 171 Name == "avx512.mask.div.ps.128" || // Added in 4.0 172 Name == "avx512.mask.div.ps.256" || // Added in 4.0 173 Name == "avx512.mask.mul.pd.128" || // Added in 4.0 174 Name == "avx512.mask.mul.pd.256" || // Added in 4.0 175 Name == "avx512.mask.mul.ps.128" || // Added in 4.0 176 Name == "avx512.mask.mul.ps.256" || // Added in 4.0 177 Name == "avx512.mask.sub.pd.128" || // Added in 4.0 178 Name == "avx512.mask.sub.pd.256" || // Added in 4.0 179 Name == "avx512.mask.sub.ps.128" || // Added in 4.0 180 Name == "avx512.mask.sub.ps.256" || // Added in 4.0 181 Name == "avx512.mask.max.pd.128" || // Added in 5.0 182 Name == "avx512.mask.max.pd.256" || // Added in 5.0 183 Name == "avx512.mask.max.ps.128" || // Added in 5.0 184 Name == "avx512.mask.max.ps.256" || // Added in 5.0 185 Name == "avx512.mask.min.pd.128" || // Added in 5.0 186 Name == "avx512.mask.min.pd.256" || // Added in 5.0 187 Name == "avx512.mask.min.ps.128" || // Added in 5.0 188 Name == "avx512.mask.min.ps.256" || // Added in 5.0 189 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 190 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 191 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 192 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 193 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 194 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 195 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 196 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 197 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 198 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 199 Name.startswith("avx512.mask.pslli") || // Added in 4.0 200 Name.startswith("avx512.mask.psrai") || // Added in 4.0 201 Name.startswith("avx512.mask.psrli") || // Added in 4.0 202 Name.startswith("avx512.mask.psllv") || // Added in 4.0 203 Name.startswith("avx512.mask.psrav") || // Added in 4.0 204 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 205 Name.startswith("sse41.pmovsx") || // Added in 3.8 206 Name.startswith("sse41.pmovzx") || // Added in 3.9 207 Name.startswith("avx2.pmovsx") || // Added in 3.9 208 Name.startswith("avx2.pmovzx") || // Added in 3.9 209 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 210 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 211 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 212 Name == "sse2.cvtdq2pd" || // Added in 3.9 213 Name == "sse2.cvtps2pd" || // Added in 3.9 214 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 215 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 216 Name.startswith("avx.vinsertf128.") || // Added in 3.7 217 Name == "avx2.vinserti128" || // Added in 3.7 218 Name.startswith("avx512.mask.insert") || // Added in 4.0 219 Name.startswith("avx.vextractf128.") || // Added in 3.7 220 Name == "avx2.vextracti128" || // Added in 3.7 221 Name.startswith("avx512.mask.vextract") || // Added in 4.0 222 Name.startswith("sse4a.movnt.") || // Added in 3.9 223 Name.startswith("avx.movnt.") || // Added in 3.2 224 Name.startswith("avx512.storent.") || // Added in 3.9 225 Name == "sse41.movntdqa" || // Added in 5.0 226 Name == "avx2.movntdqa" || // Added in 5.0 227 Name == "avx512.movntdqa" || // Added in 5.0 228 Name == "sse2.storel.dq" || // Added in 3.9 229 Name.startswith("sse.storeu.") || // Added in 3.9 230 Name.startswith("sse2.storeu.") || // Added in 3.9 231 Name.startswith("avx.storeu.") || // Added in 3.9 232 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 233 Name.startswith("avx512.mask.store.p") || // Added in 3.9 234 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 235 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 236 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 237 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 238 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 239 Name.startswith("avx512.mask.load.") || // Added in 3.9 240 Name == "sse42.crc32.64.8" || // Added in 3.4 241 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 242 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 243 Name.startswith("avx512.mask.valign.") || // Added in 4.0 244 Name.startswith("sse2.psll.dq") || // Added in 3.7 245 Name.startswith("sse2.psrl.dq") || // Added in 3.7 246 Name.startswith("avx2.psll.dq") || // Added in 3.7 247 Name.startswith("avx2.psrl.dq") || // Added in 3.7 248 Name.startswith("avx512.psll.dq") || // Added in 3.9 249 Name.startswith("avx512.psrl.dq") || // Added in 3.9 250 Name == "sse41.pblendw" || // Added in 3.7 251 Name.startswith("sse41.blendp") || // Added in 3.7 252 Name.startswith("avx.blend.p") || // Added in 3.7 253 Name == "avx2.pblendw" || // Added in 3.7 254 Name.startswith("avx2.pblendd.") || // Added in 3.7 255 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 256 Name == "avx2.vbroadcasti128" || // Added in 3.7 257 Name.startswith("avx512.mask.broadcastf") || // Added in 6.0 258 Name.startswith("avx512.mask.broadcasti") || // Added in 6.0 259 Name == "xop.vpcmov" || // Added in 3.8 260 Name == "xop.vpcmov.256" || // Added in 5.0 261 Name.startswith("avx512.mask.move.s") || // Added in 4.0 262 Name.startswith("avx512.cvtmask2") || // Added in 5.0 263 (Name.startswith("xop.vpcom") && // Added in 3.2 264 F->arg_size() == 2) || 265 Name.startswith("avx512.ptestm") || //Added in 6.0 266 Name.startswith("avx512.ptestnm") || //Added in 6.0 267 Name.startswith("sse2.pavg") || // Added in 6.0 268 Name.startswith("avx2.pavg") || // Added in 6.0 269 Name.startswith("avx512.mask.pavg")) // Added in 6.0 270 return true; 271 272 return false; 273 } 274 275 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 276 Function *&NewFn) { 277 // Only handle intrinsics that start with "x86.". 278 if (!Name.startswith("x86.")) 279 return false; 280 // Remove "x86." prefix. 281 Name = Name.substr(4); 282 283 if (ShouldUpgradeX86Intrinsic(F, Name)) { 284 NewFn = nullptr; 285 return true; 286 } 287 288 // SSE4.1 ptest functions may have an old signature. 289 if (Name.startswith("sse41.ptest")) { // Added in 3.2 290 if (Name.substr(11) == "c") 291 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 292 if (Name.substr(11) == "z") 293 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 294 if (Name.substr(11) == "nzc") 295 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 296 } 297 // Several blend and other instructions with masks used the wrong number of 298 // bits. 299 if (Name == "sse41.insertps") // Added in 3.6 300 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 301 NewFn); 302 if (Name == "sse41.dppd") // Added in 3.6 303 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 304 NewFn); 305 if (Name == "sse41.dpps") // Added in 3.6 306 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 307 NewFn); 308 if (Name == "sse41.mpsadbw") // Added in 3.6 309 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 310 NewFn); 311 if (Name == "avx.dp.ps.256") // Added in 3.6 312 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 313 NewFn); 314 if (Name == "avx2.mpsadbw") // Added in 3.6 315 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 316 NewFn); 317 318 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 319 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 320 rename(F); 321 NewFn = Intrinsic::getDeclaration(F->getParent(), 322 Intrinsic::x86_xop_vfrcz_ss); 323 return true; 324 } 325 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 326 rename(F); 327 NewFn = Intrinsic::getDeclaration(F->getParent(), 328 Intrinsic::x86_xop_vfrcz_sd); 329 return true; 330 } 331 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 332 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 333 auto Idx = F->getFunctionType()->getParamType(2); 334 if (Idx->isFPOrFPVectorTy()) { 335 rename(F); 336 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 337 unsigned EltSize = Idx->getScalarSizeInBits(); 338 Intrinsic::ID Permil2ID; 339 if (EltSize == 64 && IdxSize == 128) 340 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 341 else if (EltSize == 32 && IdxSize == 128) 342 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 343 else if (EltSize == 64 && IdxSize == 256) 344 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 345 else 346 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 347 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 348 return true; 349 } 350 } 351 352 return false; 353 } 354 355 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 356 assert(F && "Illegal to upgrade a non-existent Function."); 357 358 // Quickly eliminate it, if it's not a candidate. 359 StringRef Name = F->getName(); 360 if (Name.size() <= 8 || !Name.startswith("llvm.")) 361 return false; 362 Name = Name.substr(5); // Strip off "llvm." 363 364 switch (Name[0]) { 365 default: break; 366 case 'a': { 367 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 368 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 369 F->arg_begin()->getType()); 370 return true; 371 } 372 if (Name.startswith("arm.neon.vclz")) { 373 Type* args[2] = { 374 F->arg_begin()->getType(), 375 Type::getInt1Ty(F->getContext()) 376 }; 377 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 378 // the end of the name. Change name from llvm.arm.neon.vclz.* to 379 // llvm.ctlz.* 380 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 381 NewFn = Function::Create(fType, F->getLinkage(), 382 "llvm.ctlz." + Name.substr(14), F->getParent()); 383 return true; 384 } 385 if (Name.startswith("arm.neon.vcnt")) { 386 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 387 F->arg_begin()->getType()); 388 return true; 389 } 390 Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 391 if (vldRegex.match(Name)) { 392 auto fArgs = F->getFunctionType()->params(); 393 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 394 // Can't use Intrinsic::getDeclaration here as the return types might 395 // then only be structurally equal. 396 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 397 NewFn = Function::Create(fType, F->getLinkage(), 398 "llvm." + Name + ".p0i8", F->getParent()); 399 return true; 400 } 401 Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 402 if (vstRegex.match(Name)) { 403 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 404 Intrinsic::arm_neon_vst2, 405 Intrinsic::arm_neon_vst3, 406 Intrinsic::arm_neon_vst4}; 407 408 static const Intrinsic::ID StoreLaneInts[] = { 409 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 410 Intrinsic::arm_neon_vst4lane 411 }; 412 413 auto fArgs = F->getFunctionType()->params(); 414 Type *Tys[] = {fArgs[0], fArgs[1]}; 415 if (Name.find("lane") == StringRef::npos) 416 NewFn = Intrinsic::getDeclaration(F->getParent(), 417 StoreInts[fArgs.size() - 3], Tys); 418 else 419 NewFn = Intrinsic::getDeclaration(F->getParent(), 420 StoreLaneInts[fArgs.size() - 5], Tys); 421 return true; 422 } 423 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 424 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 425 return true; 426 } 427 break; 428 } 429 430 case 'c': { 431 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 432 rename(F); 433 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 434 F->arg_begin()->getType()); 435 return true; 436 } 437 if (Name.startswith("cttz.") && F->arg_size() == 1) { 438 rename(F); 439 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 440 F->arg_begin()->getType()); 441 return true; 442 } 443 break; 444 } 445 case 'd': { 446 if (Name == "dbg.value" && F->arg_size() == 4) { 447 rename(F); 448 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value); 449 return true; 450 } 451 break; 452 } 453 case 'i': 454 case 'l': { 455 bool IsLifetimeStart = Name.startswith("lifetime.start"); 456 if (IsLifetimeStart || Name.startswith("invariant.start")) { 457 Intrinsic::ID ID = IsLifetimeStart ? 458 Intrinsic::lifetime_start : Intrinsic::invariant_start; 459 auto Args = F->getFunctionType()->params(); 460 Type* ObjectPtr[1] = {Args[1]}; 461 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 462 rename(F); 463 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 464 return true; 465 } 466 } 467 468 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 469 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 470 Intrinsic::ID ID = IsLifetimeEnd ? 471 Intrinsic::lifetime_end : Intrinsic::invariant_end; 472 473 auto Args = F->getFunctionType()->params(); 474 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 475 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 476 rename(F); 477 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 478 return true; 479 } 480 } 481 break; 482 } 483 case 'm': { 484 if (Name.startswith("masked.load.")) { 485 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 486 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 487 rename(F); 488 NewFn = Intrinsic::getDeclaration(F->getParent(), 489 Intrinsic::masked_load, 490 Tys); 491 return true; 492 } 493 } 494 if (Name.startswith("masked.store.")) { 495 auto Args = F->getFunctionType()->params(); 496 Type *Tys[] = { Args[0], Args[1] }; 497 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 498 rename(F); 499 NewFn = Intrinsic::getDeclaration(F->getParent(), 500 Intrinsic::masked_store, 501 Tys); 502 return true; 503 } 504 } 505 // Renaming gather/scatter intrinsics with no address space overloading 506 // to the new overload which includes an address space 507 if (Name.startswith("masked.gather.")) { 508 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; 509 if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) { 510 rename(F); 511 NewFn = Intrinsic::getDeclaration(F->getParent(), 512 Intrinsic::masked_gather, Tys); 513 return true; 514 } 515 } 516 if (Name.startswith("masked.scatter.")) { 517 auto Args = F->getFunctionType()->params(); 518 Type *Tys[] = {Args[0], Args[1]}; 519 if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) { 520 rename(F); 521 NewFn = Intrinsic::getDeclaration(F->getParent(), 522 Intrinsic::masked_scatter, Tys); 523 return true; 524 } 525 } 526 // Updating the memory intrinsics (memcpy/memmove/memset) that have an 527 // alignment parameter to embedding the alignment as an attribute of 528 // the pointer args. 529 if (Name.startswith("memcpy.") && F->arg_size() == 5) { 530 rename(F); 531 // Get the types of dest, src, and len 532 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 533 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy, 534 ParamTypes); 535 return true; 536 } 537 if (Name.startswith("memmove.") && F->arg_size() == 5) { 538 rename(F); 539 // Get the types of dest, src, and len 540 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 541 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove, 542 ParamTypes); 543 return true; 544 } 545 if (Name.startswith("memset.") && F->arg_size() == 5) { 546 rename(F); 547 // Get the types of dest, and len 548 const auto *FT = F->getFunctionType(); 549 Type *ParamTypes[2] = { 550 FT->getParamType(0), // Dest 551 FT->getParamType(2) // len 552 }; 553 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset, 554 ParamTypes); 555 return true; 556 } 557 break; 558 } 559 case 'n': { 560 if (Name.startswith("nvvm.")) { 561 Name = Name.substr(5); 562 563 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 564 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 565 .Cases("brev32", "brev64", Intrinsic::bitreverse) 566 .Case("clz.i", Intrinsic::ctlz) 567 .Case("popc.i", Intrinsic::ctpop) 568 .Default(Intrinsic::not_intrinsic); 569 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 570 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 571 {F->getReturnType()}); 572 return true; 573 } 574 575 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 576 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 577 // 578 // TODO: We could add lohi.i2d. 579 bool Expand = StringSwitch<bool>(Name) 580 .Cases("abs.i", "abs.ll", true) 581 .Cases("clz.ll", "popc.ll", "h2f", true) 582 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 583 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 584 .Default(false); 585 if (Expand) { 586 NewFn = nullptr; 587 return true; 588 } 589 } 590 break; 591 } 592 case 'o': 593 // We only need to change the name to match the mangling including the 594 // address space. 595 if (Name.startswith("objectsize.")) { 596 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 597 if (F->arg_size() == 2 || 598 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 599 rename(F); 600 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 601 Tys); 602 return true; 603 } 604 } 605 break; 606 607 case 's': 608 if (Name == "stackprotectorcheck") { 609 NewFn = nullptr; 610 return true; 611 } 612 break; 613 614 case 'x': 615 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 616 return true; 617 } 618 // Remangle our intrinsic since we upgrade the mangling 619 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 620 if (Result != None) { 621 NewFn = Result.getValue(); 622 return true; 623 } 624 625 // This may not belong here. This function is effectively being overloaded 626 // to both detect an intrinsic which needs upgrading, and to provide the 627 // upgraded form of the intrinsic. We should perhaps have two separate 628 // functions for this. 629 return false; 630 } 631 632 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 633 NewFn = nullptr; 634 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 635 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 636 637 // Upgrade intrinsic attributes. This does not change the function. 638 if (NewFn) 639 F = NewFn; 640 if (Intrinsic::ID id = F->getIntrinsicID()) 641 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 642 return Upgraded; 643 } 644 645 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 646 // Nothing to do yet. 647 return false; 648 } 649 650 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 651 // to byte shuffles. 652 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 653 Value *Op, unsigned Shift) { 654 Type *ResultTy = Op->getType(); 655 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 656 657 // Bitcast from a 64-bit element type to a byte element type. 658 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 659 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 660 661 // We'll be shuffling in zeroes. 662 Value *Res = Constant::getNullValue(VecTy); 663 664 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 665 // we'll just return the zero vector. 666 if (Shift < 16) { 667 uint32_t Idxs[64]; 668 // 256/512-bit version is split into 2/4 16-byte lanes. 669 for (unsigned l = 0; l != NumElts; l += 16) 670 for (unsigned i = 0; i != 16; ++i) { 671 unsigned Idx = NumElts + i - Shift; 672 if (Idx < NumElts) 673 Idx -= NumElts - 16; // end of lane, switch operand. 674 Idxs[l + i] = Idx + l; 675 } 676 677 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 678 } 679 680 // Bitcast back to a 64-bit element type. 681 return Builder.CreateBitCast(Res, ResultTy, "cast"); 682 } 683 684 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 685 // to byte shuffles. 686 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 687 unsigned Shift) { 688 Type *ResultTy = Op->getType(); 689 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 690 691 // Bitcast from a 64-bit element type to a byte element type. 692 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 693 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 694 695 // We'll be shuffling in zeroes. 696 Value *Res = Constant::getNullValue(VecTy); 697 698 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 699 // we'll just return the zero vector. 700 if (Shift < 16) { 701 uint32_t Idxs[64]; 702 // 256/512-bit version is split into 2/4 16-byte lanes. 703 for (unsigned l = 0; l != NumElts; l += 16) 704 for (unsigned i = 0; i != 16; ++i) { 705 unsigned Idx = i + Shift; 706 if (Idx >= 16) 707 Idx += NumElts - 16; // end of lane, switch operand. 708 Idxs[l + i] = Idx + l; 709 } 710 711 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 712 } 713 714 // Bitcast back to a 64-bit element type. 715 return Builder.CreateBitCast(Res, ResultTy, "cast"); 716 } 717 718 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 719 unsigned NumElts) { 720 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 721 cast<IntegerType>(Mask->getType())->getBitWidth()); 722 Mask = Builder.CreateBitCast(Mask, MaskTy); 723 724 // If we have less than 8 elements, then the starting mask was an i8 and 725 // we need to extract down to the right number of elements. 726 if (NumElts < 8) { 727 uint32_t Indices[4]; 728 for (unsigned i = 0; i != NumElts; ++i) 729 Indices[i] = i; 730 Mask = Builder.CreateShuffleVector(Mask, Mask, 731 makeArrayRef(Indices, NumElts), 732 "extract"); 733 } 734 735 return Mask; 736 } 737 738 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 739 Value *Op0, Value *Op1) { 740 // If the mask is all ones just emit the align operation. 741 if (const auto *C = dyn_cast<Constant>(Mask)) 742 if (C->isAllOnesValue()) 743 return Op0; 744 745 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 746 return Builder.CreateSelect(Mask, Op0, Op1); 747 } 748 749 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 750 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 751 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 752 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 753 Value *Op1, Value *Shift, 754 Value *Passthru, Value *Mask, 755 bool IsVALIGN) { 756 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 757 758 unsigned NumElts = Op0->getType()->getVectorNumElements(); 759 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 760 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 761 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 762 763 // Mask the immediate for VALIGN. 764 if (IsVALIGN) 765 ShiftVal &= (NumElts - 1); 766 767 // If palignr is shifting the pair of vectors more than the size of two 768 // lanes, emit zero. 769 if (ShiftVal >= 32) 770 return llvm::Constant::getNullValue(Op0->getType()); 771 772 // If palignr is shifting the pair of input vectors more than one lane, 773 // but less than two lanes, convert to shifting in zeroes. 774 if (ShiftVal > 16) { 775 ShiftVal -= 16; 776 Op1 = Op0; 777 Op0 = llvm::Constant::getNullValue(Op0->getType()); 778 } 779 780 uint32_t Indices[64]; 781 // 256-bit palignr operates on 128-bit lanes so we need to handle that 782 for (unsigned l = 0; l < NumElts; l += 16) { 783 for (unsigned i = 0; i != 16; ++i) { 784 unsigned Idx = ShiftVal + i; 785 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 786 Idx += NumElts - 16; // End of lane, switch operand. 787 Indices[l + i] = Idx + l; 788 } 789 } 790 791 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 792 makeArrayRef(Indices, NumElts), 793 "palignr"); 794 795 return EmitX86Select(Builder, Mask, Align, Passthru); 796 } 797 798 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 799 Value *Ptr, Value *Data, Value *Mask, 800 bool Aligned) { 801 // Cast the pointer to the right type. 802 Ptr = Builder.CreateBitCast(Ptr, 803 llvm::PointerType::getUnqual(Data->getType())); 804 unsigned Align = 805 Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1; 806 807 // If the mask is all ones just emit a regular store. 808 if (const auto *C = dyn_cast<Constant>(Mask)) 809 if (C->isAllOnesValue()) 810 return Builder.CreateAlignedStore(Data, Ptr, Align); 811 812 // Convert the mask from an integer type to a vector of i1. 813 unsigned NumElts = Data->getType()->getVectorNumElements(); 814 Mask = getX86MaskVec(Builder, Mask, NumElts); 815 return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); 816 } 817 818 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 819 Value *Ptr, Value *Passthru, Value *Mask, 820 bool Aligned) { 821 // Cast the pointer to the right type. 822 Ptr = Builder.CreateBitCast(Ptr, 823 llvm::PointerType::getUnqual(Passthru->getType())); 824 unsigned Align = 825 Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1; 826 827 // If the mask is all ones just emit a regular store. 828 if (const auto *C = dyn_cast<Constant>(Mask)) 829 if (C->isAllOnesValue()) 830 return Builder.CreateAlignedLoad(Ptr, Align); 831 832 // Convert the mask from an integer type to a vector of i1. 833 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 834 Mask = getX86MaskVec(Builder, Mask, NumElts); 835 return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); 836 } 837 838 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { 839 Value *Op0 = CI.getArgOperand(0); 840 llvm::Type *Ty = Op0->getType(); 841 Value *Zero = llvm::Constant::getNullValue(Ty); 842 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero); 843 Value *Neg = Builder.CreateNeg(Op0); 844 Value *Res = Builder.CreateSelect(Cmp, Op0, Neg); 845 846 if (CI.getNumArgOperands() == 3) 847 Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1)); 848 849 return Res; 850 } 851 852 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 853 ICmpInst::Predicate Pred) { 854 Value *Op0 = CI.getArgOperand(0); 855 Value *Op1 = CI.getArgOperand(1); 856 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 857 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 858 859 if (CI.getNumArgOperands() == 4) 860 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 861 862 return Res; 863 } 864 865 // Applying mask on vector of i1's and make sure result is at least 8 bits wide. 866 static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder,Value *Vec, Value *Mask, 867 unsigned NumElts) { 868 if (Mask) { 869 const auto *C = dyn_cast<Constant>(Mask); 870 if (!C || !C->isAllOnesValue()) 871 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts)); 872 } 873 874 if (NumElts < 8) { 875 uint32_t Indices[8]; 876 for (unsigned i = 0; i != NumElts; ++i) 877 Indices[i] = i; 878 for (unsigned i = NumElts; i != 8; ++i) 879 Indices[i] = NumElts + i % NumElts; 880 Vec = Builder.CreateShuffleVector(Vec, 881 Constant::getNullValue(Vec->getType()), 882 Indices); 883 } 884 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U))); 885 } 886 887 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 888 unsigned CC, bool Signed) { 889 Value *Op0 = CI.getArgOperand(0); 890 unsigned NumElts = Op0->getType()->getVectorNumElements(); 891 892 Value *Cmp; 893 if (CC == 3) { 894 Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 895 } else if (CC == 7) { 896 Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 897 } else { 898 ICmpInst::Predicate Pred; 899 switch (CC) { 900 default: llvm_unreachable("Unknown condition code"); 901 case 0: Pred = ICmpInst::ICMP_EQ; break; 902 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 903 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 904 case 4: Pred = ICmpInst::ICMP_NE; break; 905 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 906 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 907 } 908 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 909 } 910 911 Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1); 912 913 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask, NumElts); 914 } 915 916 // Replace a masked intrinsic with an older unmasked intrinsic. 917 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 918 Intrinsic::ID IID) { 919 Function *F = CI.getCalledFunction(); 920 Function *Intrin = Intrinsic::getDeclaration(F->getParent(), IID); 921 Value *Rep = Builder.CreateCall(Intrin, 922 { CI.getArgOperand(0), CI.getArgOperand(1) }); 923 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 924 } 925 926 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 927 Value* A = CI.getArgOperand(0); 928 Value* B = CI.getArgOperand(1); 929 Value* Src = CI.getArgOperand(2); 930 Value* Mask = CI.getArgOperand(3); 931 932 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 933 Value* Cmp = Builder.CreateIsNotNull(AndNode); 934 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 935 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 936 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 937 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 938 } 939 940 941 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 942 Value* Op = CI.getArgOperand(0); 943 Type* ReturnOp = CI.getType(); 944 unsigned NumElts = CI.getType()->getVectorNumElements(); 945 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 946 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 947 } 948 949 /// Upgrade a call to an old intrinsic. All argument and return casting must be 950 /// provided to seamlessly integrate with existing context. 951 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 952 Function *F = CI->getCalledFunction(); 953 LLVMContext &C = CI->getContext(); 954 IRBuilder<> Builder(C); 955 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 956 957 assert(F && "Intrinsic call is not direct?"); 958 959 if (!NewFn) { 960 // Get the Function's name. 961 StringRef Name = F->getName(); 962 963 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 964 Name = Name.substr(5); 965 966 bool IsX86 = Name.startswith("x86."); 967 if (IsX86) 968 Name = Name.substr(4); 969 bool IsNVVM = Name.startswith("nvvm."); 970 if (IsNVVM) 971 Name = Name.substr(5); 972 973 if (IsX86 && Name.startswith("sse4a.movnt.")) { 974 Module *M = F->getParent(); 975 SmallVector<Metadata *, 1> Elts; 976 Elts.push_back( 977 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 978 MDNode *Node = MDNode::get(C, Elts); 979 980 Value *Arg0 = CI->getArgOperand(0); 981 Value *Arg1 = CI->getArgOperand(1); 982 983 // Nontemporal (unaligned) store of the 0'th element of the float/double 984 // vector. 985 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 986 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 987 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 988 Value *Extract = 989 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 990 991 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); 992 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 993 994 // Remove intrinsic. 995 CI->eraseFromParent(); 996 return; 997 } 998 999 if (IsX86 && (Name.startswith("avx.movnt.") || 1000 Name.startswith("avx512.storent."))) { 1001 Module *M = F->getParent(); 1002 SmallVector<Metadata *, 1> Elts; 1003 Elts.push_back( 1004 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1005 MDNode *Node = MDNode::get(C, Elts); 1006 1007 Value *Arg0 = CI->getArgOperand(0); 1008 Value *Arg1 = CI->getArgOperand(1); 1009 1010 // Convert the type of the pointer to a pointer to the stored type. 1011 Value *BC = Builder.CreateBitCast(Arg0, 1012 PointerType::getUnqual(Arg1->getType()), 1013 "cast"); 1014 VectorType *VTy = cast<VectorType>(Arg1->getType()); 1015 StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 1016 VTy->getBitWidth() / 8); 1017 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1018 1019 // Remove intrinsic. 1020 CI->eraseFromParent(); 1021 return; 1022 } 1023 1024 if (IsX86 && Name == "sse2.storel.dq") { 1025 Value *Arg0 = CI->getArgOperand(0); 1026 Value *Arg1 = CI->getArgOperand(1); 1027 1028 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 1029 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 1030 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 1031 Value *BC = Builder.CreateBitCast(Arg0, 1032 PointerType::getUnqual(Elt->getType()), 1033 "cast"); 1034 Builder.CreateAlignedStore(Elt, BC, 1); 1035 1036 // Remove intrinsic. 1037 CI->eraseFromParent(); 1038 return; 1039 } 1040 1041 if (IsX86 && (Name.startswith("sse.storeu.") || 1042 Name.startswith("sse2.storeu.") || 1043 Name.startswith("avx.storeu."))) { 1044 Value *Arg0 = CI->getArgOperand(0); 1045 Value *Arg1 = CI->getArgOperand(1); 1046 1047 Arg0 = Builder.CreateBitCast(Arg0, 1048 PointerType::getUnqual(Arg1->getType()), 1049 "cast"); 1050 Builder.CreateAlignedStore(Arg1, Arg0, 1); 1051 1052 // Remove intrinsic. 1053 CI->eraseFromParent(); 1054 return; 1055 } 1056 1057 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 1058 // "avx512.mask.storeu." or "avx512.mask.store." 1059 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 1060 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1061 CI->getArgOperand(2), Aligned); 1062 1063 // Remove intrinsic. 1064 CI->eraseFromParent(); 1065 return; 1066 } 1067 1068 Value *Rep; 1069 // Upgrade packed integer vector compare intrinsics to compare instructions. 1070 if (IsX86 && (Name.startswith("sse2.pcmp") || 1071 Name.startswith("avx2.pcmp"))) { 1072 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 1073 bool CmpEq = Name[9] == 'e'; 1074 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 1075 CI->getArgOperand(0), CI->getArgOperand(1)); 1076 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 1077 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) { 1078 Type *ExtTy = Type::getInt32Ty(C); 1079 if (CI->getOperand(0)->getType()->isIntegerTy(8)) 1080 ExtTy = Type::getInt64Ty(C); 1081 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 1082 ExtTy->getPrimitiveSizeInBits(); 1083 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy); 1084 Rep = Builder.CreateVectorSplat(NumElts, Rep); 1085 } else if (IsX86 && (Name.startswith("avx512.ptestm") || 1086 Name.startswith("avx512.ptestnm"))) { 1087 Value *Op0 = CI->getArgOperand(0); 1088 Value *Op1 = CI->getArgOperand(1); 1089 Value *Mask = CI->getArgOperand(2); 1090 Rep = Builder.CreateAnd(Op0, Op1); 1091 llvm::Type *Ty = Op0->getType(); 1092 Value *Zero = llvm::Constant::getNullValue(Ty); 1093 ICmpInst::Predicate Pred = 1094 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ; 1095 Rep = Builder.CreateICmp(Pred, Rep, Zero); 1096 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1097 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask, NumElts); 1098 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ 1099 unsigned NumElts = 1100 CI->getArgOperand(1)->getType()->getVectorNumElements(); 1101 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); 1102 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1103 CI->getArgOperand(1)); 1104 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) { 1105 unsigned NumElts = CI->getType()->getScalarSizeInBits(); 1106 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts); 1107 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts); 1108 uint32_t Indices[64]; 1109 for (unsigned i = 0; i != NumElts; ++i) 1110 Indices[i] = i; 1111 1112 // First extract half of each vector. This gives better codegen than 1113 // doing it in a single shuffle. 1114 LHS = Builder.CreateShuffleVector(LHS, LHS, 1115 makeArrayRef(Indices, NumElts / 2)); 1116 RHS = Builder.CreateShuffleVector(RHS, RHS, 1117 makeArrayRef(Indices, NumElts / 2)); 1118 // Concat the vectors. 1119 Rep = Builder.CreateShuffleVector(LHS, RHS, 1120 makeArrayRef(Indices, NumElts)); 1121 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1122 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd")) { 1123 Type *I32Ty = Type::getInt32Ty(C); 1124 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1125 ConstantInt::get(I32Ty, 0)); 1126 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1127 ConstantInt::get(I32Ty, 0)); 1128 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 1129 Builder.CreateFAdd(Elt0, Elt1), 1130 ConstantInt::get(I32Ty, 0)); 1131 } else if (IsX86 && (Name == "sse.sub.ss" || Name == "sse2.sub.sd")) { 1132 Type *I32Ty = Type::getInt32Ty(C); 1133 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1134 ConstantInt::get(I32Ty, 0)); 1135 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1136 ConstantInt::get(I32Ty, 0)); 1137 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 1138 Builder.CreateFSub(Elt0, Elt1), 1139 ConstantInt::get(I32Ty, 0)); 1140 } else if (IsX86 && (Name == "sse.mul.ss" || Name == "sse2.mul.sd")) { 1141 Type *I32Ty = Type::getInt32Ty(C); 1142 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1143 ConstantInt::get(I32Ty, 0)); 1144 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1145 ConstantInt::get(I32Ty, 0)); 1146 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 1147 Builder.CreateFMul(Elt0, Elt1), 1148 ConstantInt::get(I32Ty, 0)); 1149 } else if (IsX86 && (Name == "sse.div.ss" || Name == "sse2.div.sd")) { 1150 Type *I32Ty = Type::getInt32Ty(C); 1151 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1152 ConstantInt::get(I32Ty, 0)); 1153 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1154 ConstantInt::get(I32Ty, 0)); 1155 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 1156 Builder.CreateFDiv(Elt0, Elt1), 1157 ConstantInt::get(I32Ty, 0)); 1158 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 1159 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 1160 bool CmpEq = Name[16] == 'e'; 1161 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true); 1162 } else if (IsX86 && Name.startswith("avx512.mask.cmp")) { 1163 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1164 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true); 1165 } else if (IsX86 && Name.startswith("avx512.mask.ucmp")) { 1166 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1167 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false); 1168 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") || 1169 Name.startswith("avx512.cvtw2mask.") || 1170 Name.startswith("avx512.cvtd2mask.") || 1171 Name.startswith("avx512.cvtq2mask."))) { 1172 Value *Op = CI->getArgOperand(0); 1173 Value *Zero = llvm::Constant::getNullValue(Op->getType()); 1174 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero); 1175 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr, 1176 Op->getType()->getVectorNumElements()); 1177 } else if(IsX86 && (Name == "ssse3.pabs.b.128" || 1178 Name == "ssse3.pabs.w.128" || 1179 Name == "ssse3.pabs.d.128" || 1180 Name.startswith("avx2.pabs") || 1181 Name.startswith("avx512.mask.pabs"))) { 1182 Rep = upgradeAbs(Builder, *CI); 1183 } else if (IsX86 && (Name == "sse41.pmaxsb" || 1184 Name == "sse2.pmaxs.w" || 1185 Name == "sse41.pmaxsd" || 1186 Name.startswith("avx2.pmaxs") || 1187 Name.startswith("avx512.mask.pmaxs"))) { 1188 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 1189 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 1190 Name == "sse41.pmaxuw" || 1191 Name == "sse41.pmaxud" || 1192 Name.startswith("avx2.pmaxu") || 1193 Name.startswith("avx512.mask.pmaxu"))) { 1194 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 1195 } else if (IsX86 && (Name == "sse41.pminsb" || 1196 Name == "sse2.pmins.w" || 1197 Name == "sse41.pminsd" || 1198 Name.startswith("avx2.pmins") || 1199 Name.startswith("avx512.mask.pmins"))) { 1200 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 1201 } else if (IsX86 && (Name == "sse2.pminu.b" || 1202 Name == "sse41.pminuw" || 1203 Name == "sse41.pminud" || 1204 Name.startswith("avx2.pminu") || 1205 Name.startswith("avx512.mask.pminu"))) { 1206 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 1207 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 1208 Name == "sse2.cvtps2pd" || 1209 Name == "avx.cvtdq2.pd.256" || 1210 Name == "avx.cvt.ps2.pd.256" || 1211 Name.startswith("avx512.mask.cvtdq2pd.") || 1212 Name.startswith("avx512.mask.cvtudq2pd."))) { 1213 // Lossless i32/float to double conversion. 1214 // Extract the bottom elements if necessary and convert to double vector. 1215 Value *Src = CI->getArgOperand(0); 1216 VectorType *SrcTy = cast<VectorType>(Src->getType()); 1217 VectorType *DstTy = cast<VectorType>(CI->getType()); 1218 Rep = CI->getArgOperand(0); 1219 1220 unsigned NumDstElts = DstTy->getNumElements(); 1221 if (NumDstElts < SrcTy->getNumElements()) { 1222 assert(NumDstElts == 2 && "Unexpected vector size"); 1223 uint32_t ShuffleMask[2] = { 0, 1 }; 1224 Rep = Builder.CreateShuffleVector(Rep, UndefValue::get(SrcTy), 1225 ShuffleMask); 1226 } 1227 1228 bool SInt2Double = (StringRef::npos != Name.find("cvtdq2")); 1229 bool UInt2Double = (StringRef::npos != Name.find("cvtudq2")); 1230 if (SInt2Double) 1231 Rep = Builder.CreateSIToFP(Rep, DstTy, "cvtdq2pd"); 1232 else if (UInt2Double) 1233 Rep = Builder.CreateUIToFP(Rep, DstTy, "cvtudq2pd"); 1234 else 1235 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 1236 1237 if (CI->getNumArgOperands() == 3) 1238 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1239 CI->getArgOperand(1)); 1240 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 1241 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1242 CI->getArgOperand(1), CI->getArgOperand(2), 1243 /*Aligned*/false); 1244 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 1245 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1246 CI->getArgOperand(1),CI->getArgOperand(2), 1247 /*Aligned*/true); 1248 } else if (IsX86 && Name.startswith("xop.vpcom")) { 1249 Intrinsic::ID intID; 1250 if (Name.endswith("ub")) 1251 intID = Intrinsic::x86_xop_vpcomub; 1252 else if (Name.endswith("uw")) 1253 intID = Intrinsic::x86_xop_vpcomuw; 1254 else if (Name.endswith("ud")) 1255 intID = Intrinsic::x86_xop_vpcomud; 1256 else if (Name.endswith("uq")) 1257 intID = Intrinsic::x86_xop_vpcomuq; 1258 else if (Name.endswith("b")) 1259 intID = Intrinsic::x86_xop_vpcomb; 1260 else if (Name.endswith("w")) 1261 intID = Intrinsic::x86_xop_vpcomw; 1262 else if (Name.endswith("d")) 1263 intID = Intrinsic::x86_xop_vpcomd; 1264 else if (Name.endswith("q")) 1265 intID = Intrinsic::x86_xop_vpcomq; 1266 else 1267 llvm_unreachable("Unknown suffix"); 1268 1269 Name = Name.substr(9); // strip off "xop.vpcom" 1270 unsigned Imm; 1271 if (Name.startswith("lt")) 1272 Imm = 0; 1273 else if (Name.startswith("le")) 1274 Imm = 1; 1275 else if (Name.startswith("gt")) 1276 Imm = 2; 1277 else if (Name.startswith("ge")) 1278 Imm = 3; 1279 else if (Name.startswith("eq")) 1280 Imm = 4; 1281 else if (Name.startswith("ne")) 1282 Imm = 5; 1283 else if (Name.startswith("false")) 1284 Imm = 6; 1285 else if (Name.startswith("true")) 1286 Imm = 7; 1287 else 1288 llvm_unreachable("Unknown condition"); 1289 1290 Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID); 1291 Rep = 1292 Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1), 1293 Builder.getInt8(Imm)}); 1294 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 1295 Value *Sel = CI->getArgOperand(2); 1296 Value *NotSel = Builder.CreateNot(Sel); 1297 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 1298 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 1299 Rep = Builder.CreateOr(Sel0, Sel1); 1300 } else if (IsX86 && Name == "sse42.crc32.64.8") { 1301 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 1302 Intrinsic::x86_sse42_crc32_32_8); 1303 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 1304 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 1305 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 1306 } else if (IsX86 && Name.startswith("avx.vbroadcast.s")) { 1307 // Replace broadcasts with a series of insertelements. 1308 Type *VecTy = CI->getType(); 1309 Type *EltTy = VecTy->getVectorElementType(); 1310 unsigned EltNum = VecTy->getVectorNumElements(); 1311 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 1312 EltTy->getPointerTo()); 1313 Value *Load = Builder.CreateLoad(EltTy, Cast); 1314 Type *I32Ty = Type::getInt32Ty(C); 1315 Rep = UndefValue::get(VecTy); 1316 for (unsigned I = 0; I < EltNum; ++I) 1317 Rep = Builder.CreateInsertElement(Rep, Load, 1318 ConstantInt::get(I32Ty, I)); 1319 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 1320 Name.startswith("sse41.pmovzx") || 1321 Name.startswith("avx2.pmovsx") || 1322 Name.startswith("avx2.pmovzx") || 1323 Name.startswith("avx512.mask.pmovsx") || 1324 Name.startswith("avx512.mask.pmovzx"))) { 1325 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 1326 VectorType *DstTy = cast<VectorType>(CI->getType()); 1327 unsigned NumDstElts = DstTy->getNumElements(); 1328 1329 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 1330 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 1331 for (unsigned i = 0; i != NumDstElts; ++i) 1332 ShuffleMask[i] = i; 1333 1334 Value *SV = Builder.CreateShuffleVector( 1335 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 1336 1337 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 1338 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 1339 : Builder.CreateZExt(SV, DstTy); 1340 // If there are 3 arguments, it's a masked intrinsic so we need a select. 1341 if (CI->getNumArgOperands() == 3) 1342 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1343 CI->getArgOperand(1)); 1344 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 1345 Name == "avx2.vbroadcasti128")) { 1346 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 1347 Type *EltTy = CI->getType()->getVectorElementType(); 1348 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 1349 Type *VT = VectorType::get(EltTy, NumSrcElts); 1350 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 1351 PointerType::getUnqual(VT)); 1352 Value *Load = Builder.CreateAlignedLoad(Op, 1); 1353 if (NumSrcElts == 2) 1354 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 1355 { 0, 1, 0, 1 }); 1356 else 1357 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 1358 { 0, 1, 2, 3, 0, 1, 2, 3 }); 1359 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") || 1360 Name.startswith("avx512.mask.shuf.f"))) { 1361 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1362 Type *VT = CI->getType(); 1363 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128; 1364 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits(); 1365 unsigned ControlBitsMask = NumLanes - 1; 1366 unsigned NumControlBits = NumLanes / 2; 1367 SmallVector<uint32_t, 8> ShuffleMask(0); 1368 1369 for (unsigned l = 0; l != NumLanes; ++l) { 1370 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask; 1371 // We actually need the other source. 1372 if (l >= NumLanes / 2) 1373 LaneMask += NumLanes; 1374 for (unsigned i = 0; i != NumElementsInLane; ++i) 1375 ShuffleMask.push_back(LaneMask * NumElementsInLane + i); 1376 } 1377 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 1378 CI->getArgOperand(1), ShuffleMask); 1379 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 1380 CI->getArgOperand(3)); 1381 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || 1382 Name.startswith("avx512.mask.broadcasti"))) { 1383 unsigned NumSrcElts = 1384 CI->getArgOperand(0)->getType()->getVectorNumElements(); 1385 unsigned NumDstElts = CI->getType()->getVectorNumElements(); 1386 1387 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 1388 for (unsigned i = 0; i != NumDstElts; ++i) 1389 ShuffleMask[i] = i % NumSrcElts; 1390 1391 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 1392 CI->getArgOperand(0), 1393 ShuffleMask); 1394 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1395 CI->getArgOperand(1)); 1396 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 1397 Name.startswith("avx2.vbroadcast") || 1398 Name.startswith("avx512.pbroadcast") || 1399 Name.startswith("avx512.mask.broadcast.s"))) { 1400 // Replace vp?broadcasts with a vector shuffle. 1401 Value *Op = CI->getArgOperand(0); 1402 unsigned NumElts = CI->getType()->getVectorNumElements(); 1403 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 1404 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 1405 Constant::getNullValue(MaskTy)); 1406 1407 if (CI->getNumArgOperands() == 3) 1408 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1409 CI->getArgOperand(1)); 1410 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 1411 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 1412 CI->getArgOperand(1), 1413 CI->getArgOperand(2), 1414 CI->getArgOperand(3), 1415 CI->getArgOperand(4), 1416 false); 1417 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 1418 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 1419 CI->getArgOperand(1), 1420 CI->getArgOperand(2), 1421 CI->getArgOperand(3), 1422 CI->getArgOperand(4), 1423 true); 1424 } else if (IsX86 && (Name == "sse2.psll.dq" || 1425 Name == "avx2.psll.dq")) { 1426 // 128/256-bit shift left specified in bits. 1427 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1428 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 1429 Shift / 8); // Shift is in bits. 1430 } else if (IsX86 && (Name == "sse2.psrl.dq" || 1431 Name == "avx2.psrl.dq")) { 1432 // 128/256-bit shift right specified in bits. 1433 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1434 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 1435 Shift / 8); // Shift is in bits. 1436 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 1437 Name == "avx2.psll.dq.bs" || 1438 Name == "avx512.psll.dq.512")) { 1439 // 128/256/512-bit shift left specified in bytes. 1440 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1441 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 1442 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 1443 Name == "avx2.psrl.dq.bs" || 1444 Name == "avx512.psrl.dq.512")) { 1445 // 128/256/512-bit shift right specified in bytes. 1446 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1447 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 1448 } else if (IsX86 && (Name == "sse41.pblendw" || 1449 Name.startswith("sse41.blendp") || 1450 Name.startswith("avx.blend.p") || 1451 Name == "avx2.pblendw" || 1452 Name.startswith("avx2.pblendd."))) { 1453 Value *Op0 = CI->getArgOperand(0); 1454 Value *Op1 = CI->getArgOperand(1); 1455 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1456 VectorType *VecTy = cast<VectorType>(CI->getType()); 1457 unsigned NumElts = VecTy->getNumElements(); 1458 1459 SmallVector<uint32_t, 16> Idxs(NumElts); 1460 for (unsigned i = 0; i != NumElts; ++i) 1461 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 1462 1463 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 1464 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 1465 Name == "avx2.vinserti128" || 1466 Name.startswith("avx512.mask.insert"))) { 1467 Value *Op0 = CI->getArgOperand(0); 1468 Value *Op1 = CI->getArgOperand(1); 1469 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1470 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 1471 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 1472 unsigned Scale = DstNumElts / SrcNumElts; 1473 1474 // Mask off the high bits of the immediate value; hardware ignores those. 1475 Imm = Imm % Scale; 1476 1477 // Extend the second operand into a vector the size of the destination. 1478 Value *UndefV = UndefValue::get(Op1->getType()); 1479 SmallVector<uint32_t, 8> Idxs(DstNumElts); 1480 for (unsigned i = 0; i != SrcNumElts; ++i) 1481 Idxs[i] = i; 1482 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 1483 Idxs[i] = SrcNumElts; 1484 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 1485 1486 // Insert the second operand into the first operand. 1487 1488 // Note that there is no guarantee that instruction lowering will actually 1489 // produce a vinsertf128 instruction for the created shuffles. In 1490 // particular, the 0 immediate case involves no lane changes, so it can 1491 // be handled as a blend. 1492 1493 // Example of shuffle mask for 32-bit elements: 1494 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 1495 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 1496 1497 // First fill with identify mask. 1498 for (unsigned i = 0; i != DstNumElts; ++i) 1499 Idxs[i] = i; 1500 // Then replace the elements where we need to insert. 1501 for (unsigned i = 0; i != SrcNumElts; ++i) 1502 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 1503 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 1504 1505 // If the intrinsic has a mask operand, handle that. 1506 if (CI->getNumArgOperands() == 5) 1507 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 1508 CI->getArgOperand(3)); 1509 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 1510 Name == "avx2.vextracti128" || 1511 Name.startswith("avx512.mask.vextract"))) { 1512 Value *Op0 = CI->getArgOperand(0); 1513 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1514 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 1515 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 1516 unsigned Scale = SrcNumElts / DstNumElts; 1517 1518 // Mask off the high bits of the immediate value; hardware ignores those. 1519 Imm = Imm % Scale; 1520 1521 // Get indexes for the subvector of the input vector. 1522 SmallVector<uint32_t, 8> Idxs(DstNumElts); 1523 for (unsigned i = 0; i != DstNumElts; ++i) { 1524 Idxs[i] = i + (Imm * DstNumElts); 1525 } 1526 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1527 1528 // If the intrinsic has a mask operand, handle that. 1529 if (CI->getNumArgOperands() == 4) 1530 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1531 CI->getArgOperand(2)); 1532 } else if (!IsX86 && Name == "stackprotectorcheck") { 1533 Rep = nullptr; 1534 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 1535 Name.startswith("avx512.mask.perm.di."))) { 1536 Value *Op0 = CI->getArgOperand(0); 1537 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1538 VectorType *VecTy = cast<VectorType>(CI->getType()); 1539 unsigned NumElts = VecTy->getNumElements(); 1540 1541 SmallVector<uint32_t, 8> Idxs(NumElts); 1542 for (unsigned i = 0; i != NumElts; ++i) 1543 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 1544 1545 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1546 1547 if (CI->getNumArgOperands() == 4) 1548 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1549 CI->getArgOperand(2)); 1550 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") || 1551 Name == "avx2.vperm2i128")) { 1552 // The immediate permute control byte looks like this: 1553 // [1:0] - select 128 bits from sources for low half of destination 1554 // [2] - ignore 1555 // [3] - zero low half of destination 1556 // [5:4] - select 128 bits from sources for high half of destination 1557 // [6] - ignore 1558 // [7] - zero high half of destination 1559 1560 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1561 1562 unsigned NumElts = CI->getType()->getVectorNumElements(); 1563 unsigned HalfSize = NumElts / 2; 1564 SmallVector<uint32_t, 8> ShuffleMask(NumElts); 1565 1566 // Determine which operand(s) are actually in use for this instruction. 1567 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0); 1568 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0); 1569 1570 // If needed, replace operands based on zero mask. 1571 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0; 1572 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1; 1573 1574 // Permute low half of result. 1575 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0; 1576 for (unsigned i = 0; i < HalfSize; ++i) 1577 ShuffleMask[i] = StartIndex + i; 1578 1579 // Permute high half of result. 1580 StartIndex = (Imm & 0x10) ? HalfSize : 0; 1581 for (unsigned i = 0; i < HalfSize; ++i) 1582 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i; 1583 1584 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 1585 1586 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 1587 Name == "sse2.pshuf.d" || 1588 Name.startswith("avx512.mask.vpermil.p") || 1589 Name.startswith("avx512.mask.pshuf.d."))) { 1590 Value *Op0 = CI->getArgOperand(0); 1591 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1592 VectorType *VecTy = cast<VectorType>(CI->getType()); 1593 unsigned NumElts = VecTy->getNumElements(); 1594 // Calculate the size of each index in the immediate. 1595 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 1596 unsigned IdxMask = ((1 << IdxSize) - 1); 1597 1598 SmallVector<uint32_t, 8> Idxs(NumElts); 1599 // Lookup the bits for this element, wrapping around the immediate every 1600 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 1601 // to offset by the first index of each group. 1602 for (unsigned i = 0; i != NumElts; ++i) 1603 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 1604 1605 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1606 1607 if (CI->getNumArgOperands() == 4) 1608 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1609 CI->getArgOperand(2)); 1610 } else if (IsX86 && (Name == "sse2.pshufl.w" || 1611 Name.startswith("avx512.mask.pshufl.w."))) { 1612 Value *Op0 = CI->getArgOperand(0); 1613 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1614 unsigned NumElts = CI->getType()->getVectorNumElements(); 1615 1616 SmallVector<uint32_t, 16> Idxs(NumElts); 1617 for (unsigned l = 0; l != NumElts; l += 8) { 1618 for (unsigned i = 0; i != 4; ++i) 1619 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 1620 for (unsigned i = 4; i != 8; ++i) 1621 Idxs[i + l] = i + l; 1622 } 1623 1624 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1625 1626 if (CI->getNumArgOperands() == 4) 1627 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1628 CI->getArgOperand(2)); 1629 } else if (IsX86 && (Name == "sse2.pshufh.w" || 1630 Name.startswith("avx512.mask.pshufh.w."))) { 1631 Value *Op0 = CI->getArgOperand(0); 1632 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1633 unsigned NumElts = CI->getType()->getVectorNumElements(); 1634 1635 SmallVector<uint32_t, 16> Idxs(NumElts); 1636 for (unsigned l = 0; l != NumElts; l += 8) { 1637 for (unsigned i = 0; i != 4; ++i) 1638 Idxs[i + l] = i + l; 1639 for (unsigned i = 0; i != 4; ++i) 1640 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 1641 } 1642 1643 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1644 1645 if (CI->getNumArgOperands() == 4) 1646 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1647 CI->getArgOperand(2)); 1648 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 1649 Value *Op0 = CI->getArgOperand(0); 1650 Value *Op1 = CI->getArgOperand(1); 1651 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1652 unsigned NumElts = CI->getType()->getVectorNumElements(); 1653 1654 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 1655 unsigned HalfLaneElts = NumLaneElts / 2; 1656 1657 SmallVector<uint32_t, 16> Idxs(NumElts); 1658 for (unsigned i = 0; i != NumElts; ++i) { 1659 // Base index is the starting element of the lane. 1660 Idxs[i] = i - (i % NumLaneElts); 1661 // If we are half way through the lane switch to the other source. 1662 if ((i % NumLaneElts) >= HalfLaneElts) 1663 Idxs[i] += NumElts; 1664 // Now select the specific element. By adding HalfLaneElts bits from 1665 // the immediate. Wrapping around the immediate every 8-bits. 1666 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 1667 } 1668 1669 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 1670 1671 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 1672 CI->getArgOperand(3)); 1673 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 1674 Name.startswith("avx512.mask.movshdup") || 1675 Name.startswith("avx512.mask.movsldup"))) { 1676 Value *Op0 = CI->getArgOperand(0); 1677 unsigned NumElts = CI->getType()->getVectorNumElements(); 1678 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 1679 1680 unsigned Offset = 0; 1681 if (Name.startswith("avx512.mask.movshdup.")) 1682 Offset = 1; 1683 1684 SmallVector<uint32_t, 16> Idxs(NumElts); 1685 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 1686 for (unsigned i = 0; i != NumLaneElts; i += 2) { 1687 Idxs[i + l + 0] = i + l + Offset; 1688 Idxs[i + l + 1] = i + l + Offset; 1689 } 1690 1691 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1692 1693 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1694 CI->getArgOperand(1)); 1695 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 1696 Name.startswith("avx512.mask.unpckl."))) { 1697 Value *Op0 = CI->getArgOperand(0); 1698 Value *Op1 = CI->getArgOperand(1); 1699 int NumElts = CI->getType()->getVectorNumElements(); 1700 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 1701 1702 SmallVector<uint32_t, 64> Idxs(NumElts); 1703 for (int l = 0; l != NumElts; l += NumLaneElts) 1704 for (int i = 0; i != NumLaneElts; ++i) 1705 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 1706 1707 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 1708 1709 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1710 CI->getArgOperand(2)); 1711 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 1712 Name.startswith("avx512.mask.unpckh."))) { 1713 Value *Op0 = CI->getArgOperand(0); 1714 Value *Op1 = CI->getArgOperand(1); 1715 int NumElts = CI->getType()->getVectorNumElements(); 1716 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 1717 1718 SmallVector<uint32_t, 64> Idxs(NumElts); 1719 for (int l = 0; l != NumElts; l += NumLaneElts) 1720 for (int i = 0; i != NumLaneElts; ++i) 1721 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 1722 1723 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 1724 1725 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1726 CI->getArgOperand(2)); 1727 } else if (IsX86 && Name.startswith("avx512.mask.pand.")) { 1728 Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1)); 1729 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1730 CI->getArgOperand(2)); 1731 } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) { 1732 Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)), 1733 CI->getArgOperand(1)); 1734 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1735 CI->getArgOperand(2)); 1736 } else if (IsX86 && Name.startswith("avx512.mask.por.")) { 1737 Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1)); 1738 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1739 CI->getArgOperand(2)); 1740 } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) { 1741 Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1)); 1742 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1743 CI->getArgOperand(2)); 1744 } else if (IsX86 && Name.startswith("avx512.mask.and.")) { 1745 VectorType *FTy = cast<VectorType>(CI->getType()); 1746 VectorType *ITy = VectorType::getInteger(FTy); 1747 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 1748 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 1749 Rep = Builder.CreateBitCast(Rep, FTy); 1750 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1751 CI->getArgOperand(2)); 1752 } else if (IsX86 && Name.startswith("avx512.mask.andn.")) { 1753 VectorType *FTy = cast<VectorType>(CI->getType()); 1754 VectorType *ITy = VectorType::getInteger(FTy); 1755 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 1756 Rep = Builder.CreateAnd(Rep, 1757 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 1758 Rep = Builder.CreateBitCast(Rep, FTy); 1759 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1760 CI->getArgOperand(2)); 1761 } else if (IsX86 && Name.startswith("avx512.mask.or.")) { 1762 VectorType *FTy = cast<VectorType>(CI->getType()); 1763 VectorType *ITy = VectorType::getInteger(FTy); 1764 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 1765 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 1766 Rep = Builder.CreateBitCast(Rep, FTy); 1767 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1768 CI->getArgOperand(2)); 1769 } else if (IsX86 && Name.startswith("avx512.mask.xor.")) { 1770 VectorType *FTy = cast<VectorType>(CI->getType()); 1771 VectorType *ITy = VectorType::getInteger(FTy); 1772 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 1773 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 1774 Rep = Builder.CreateBitCast(Rep, FTy); 1775 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1776 CI->getArgOperand(2)); 1777 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 1778 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 1779 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1780 CI->getArgOperand(2)); 1781 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 1782 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 1783 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1784 CI->getArgOperand(2)); 1785 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 1786 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 1787 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1788 CI->getArgOperand(2)); 1789 } else if (IsX86 && (Name.startswith("avx512.mask.add.p"))) { 1790 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 1791 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1792 CI->getArgOperand(2)); 1793 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 1794 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 1795 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1796 CI->getArgOperand(2)); 1797 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 1798 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 1799 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1800 CI->getArgOperand(2)); 1801 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 1802 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 1803 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1804 CI->getArgOperand(2)); 1805 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 1806 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1807 Intrinsic::ctlz, 1808 CI->getType()), 1809 { CI->getArgOperand(0), Builder.getInt1(false) }); 1810 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1811 CI->getArgOperand(1)); 1812 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") || 1813 Name.startswith("avx512.mask.min.p"))) { 1814 bool IsMin = Name[13] == 'i'; 1815 VectorType *VecTy = cast<VectorType>(CI->getType()); 1816 unsigned VecWidth = VecTy->getPrimitiveSizeInBits(); 1817 unsigned EltWidth = VecTy->getScalarSizeInBits(); 1818 Intrinsic::ID IID; 1819 if (!IsMin && VecWidth == 128 && EltWidth == 32) 1820 IID = Intrinsic::x86_sse_max_ps; 1821 else if (!IsMin && VecWidth == 128 && EltWidth == 64) 1822 IID = Intrinsic::x86_sse2_max_pd; 1823 else if (!IsMin && VecWidth == 256 && EltWidth == 32) 1824 IID = Intrinsic::x86_avx_max_ps_256; 1825 else if (!IsMin && VecWidth == 256 && EltWidth == 64) 1826 IID = Intrinsic::x86_avx_max_pd_256; 1827 else if (IsMin && VecWidth == 128 && EltWidth == 32) 1828 IID = Intrinsic::x86_sse_min_ps; 1829 else if (IsMin && VecWidth == 128 && EltWidth == 64) 1830 IID = Intrinsic::x86_sse2_min_pd; 1831 else if (IsMin && VecWidth == 256 && EltWidth == 32) 1832 IID = Intrinsic::x86_avx_min_ps_256; 1833 else if (IsMin && VecWidth == 256 && EltWidth == 64) 1834 IID = Intrinsic::x86_avx_min_pd_256; 1835 else 1836 llvm_unreachable("Unexpected intrinsic"); 1837 1838 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1839 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1840 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1841 CI->getArgOperand(2)); 1842 } else if (IsX86 && Name.startswith("avx512.mask.pshuf.b.")) { 1843 VectorType *VecTy = cast<VectorType>(CI->getType()); 1844 Intrinsic::ID IID; 1845 if (VecTy->getPrimitiveSizeInBits() == 128) 1846 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1847 else if (VecTy->getPrimitiveSizeInBits() == 256) 1848 IID = Intrinsic::x86_avx2_pshuf_b; 1849 else if (VecTy->getPrimitiveSizeInBits() == 512) 1850 IID = Intrinsic::x86_avx512_pshuf_b_512; 1851 else 1852 llvm_unreachable("Unexpected intrinsic"); 1853 1854 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1855 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1856 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1857 CI->getArgOperand(2)); 1858 } else if (IsX86 && (Name.startswith("avx512.mask.pmul.dq.") || 1859 Name.startswith("avx512.mask.pmulu.dq."))) { 1860 bool IsUnsigned = Name[16] == 'u'; 1861 VectorType *VecTy = cast<VectorType>(CI->getType()); 1862 Intrinsic::ID IID; 1863 if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 128) 1864 IID = Intrinsic::x86_sse41_pmuldq; 1865 else if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 256) 1866 IID = Intrinsic::x86_avx2_pmul_dq; 1867 else if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 512) 1868 IID = Intrinsic::x86_avx512_pmul_dq_512; 1869 else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 128) 1870 IID = Intrinsic::x86_sse2_pmulu_dq; 1871 else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 256) 1872 IID = Intrinsic::x86_avx2_pmulu_dq; 1873 else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 512) 1874 IID = Intrinsic::x86_avx512_pmulu_dq_512; 1875 else 1876 llvm_unreachable("Unexpected intrinsic"); 1877 1878 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1879 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1880 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1881 CI->getArgOperand(2)); 1882 } else if (IsX86 && Name.startswith("avx512.mask.pack")) { 1883 bool IsUnsigned = Name[16] == 'u'; 1884 bool IsDW = Name[18] == 'd'; 1885 VectorType *VecTy = cast<VectorType>(CI->getType()); 1886 Intrinsic::ID IID; 1887 if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 128) 1888 IID = Intrinsic::x86_sse2_packsswb_128; 1889 else if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 256) 1890 IID = Intrinsic::x86_avx2_packsswb; 1891 else if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 512) 1892 IID = Intrinsic::x86_avx512_packsswb_512; 1893 else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 128) 1894 IID = Intrinsic::x86_sse2_packssdw_128; 1895 else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 256) 1896 IID = Intrinsic::x86_avx2_packssdw; 1897 else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 512) 1898 IID = Intrinsic::x86_avx512_packssdw_512; 1899 else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 128) 1900 IID = Intrinsic::x86_sse2_packuswb_128; 1901 else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 256) 1902 IID = Intrinsic::x86_avx2_packuswb; 1903 else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 512) 1904 IID = Intrinsic::x86_avx512_packuswb_512; 1905 else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 128) 1906 IID = Intrinsic::x86_sse41_packusdw; 1907 else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 256) 1908 IID = Intrinsic::x86_avx2_packusdw; 1909 else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 512) 1910 IID = Intrinsic::x86_avx512_packusdw_512; 1911 else 1912 llvm_unreachable("Unexpected intrinsic"); 1913 1914 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1915 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1916 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1917 CI->getArgOperand(2)); 1918 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 1919 bool IsImmediate = Name[16] == 'i' || 1920 (Name.size() > 18 && Name[18] == 'i'); 1921 bool IsVariable = Name[16] == 'v'; 1922 char Size = Name[16] == '.' ? Name[17] : 1923 Name[17] == '.' ? Name[18] : 1924 Name[18] == '.' ? Name[19] : 1925 Name[20]; 1926 1927 Intrinsic::ID IID; 1928 if (IsVariable && Name[17] != '.') { 1929 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 1930 IID = Intrinsic::x86_avx2_psllv_q; 1931 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 1932 IID = Intrinsic::x86_avx2_psllv_q_256; 1933 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 1934 IID = Intrinsic::x86_avx2_psllv_d; 1935 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 1936 IID = Intrinsic::x86_avx2_psllv_d_256; 1937 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 1938 IID = Intrinsic::x86_avx512_psllv_w_128; 1939 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 1940 IID = Intrinsic::x86_avx512_psllv_w_256; 1941 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 1942 IID = Intrinsic::x86_avx512_psllv_w_512; 1943 else 1944 llvm_unreachable("Unexpected size"); 1945 } else if (Name.endswith(".128")) { 1946 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 1947 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 1948 : Intrinsic::x86_sse2_psll_d; 1949 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 1950 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 1951 : Intrinsic::x86_sse2_psll_q; 1952 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 1953 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 1954 : Intrinsic::x86_sse2_psll_w; 1955 else 1956 llvm_unreachable("Unexpected size"); 1957 } else if (Name.endswith(".256")) { 1958 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 1959 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 1960 : Intrinsic::x86_avx2_psll_d; 1961 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 1962 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 1963 : Intrinsic::x86_avx2_psll_q; 1964 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 1965 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 1966 : Intrinsic::x86_avx2_psll_w; 1967 else 1968 llvm_unreachable("Unexpected size"); 1969 } else { 1970 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 1971 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 1972 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 1973 Intrinsic::x86_avx512_psll_d_512; 1974 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 1975 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 1976 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 1977 Intrinsic::x86_avx512_psll_q_512; 1978 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 1979 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 1980 : Intrinsic::x86_avx512_psll_w_512; 1981 else 1982 llvm_unreachable("Unexpected size"); 1983 } 1984 1985 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 1986 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 1987 bool IsImmediate = Name[16] == 'i' || 1988 (Name.size() > 18 && Name[18] == 'i'); 1989 bool IsVariable = Name[16] == 'v'; 1990 char Size = Name[16] == '.' ? Name[17] : 1991 Name[17] == '.' ? Name[18] : 1992 Name[18] == '.' ? Name[19] : 1993 Name[20]; 1994 1995 Intrinsic::ID IID; 1996 if (IsVariable && Name[17] != '.') { 1997 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 1998 IID = Intrinsic::x86_avx2_psrlv_q; 1999 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 2000 IID = Intrinsic::x86_avx2_psrlv_q_256; 2001 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 2002 IID = Intrinsic::x86_avx2_psrlv_d; 2003 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 2004 IID = Intrinsic::x86_avx2_psrlv_d_256; 2005 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 2006 IID = Intrinsic::x86_avx512_psrlv_w_128; 2007 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 2008 IID = Intrinsic::x86_avx512_psrlv_w_256; 2009 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 2010 IID = Intrinsic::x86_avx512_psrlv_w_512; 2011 else 2012 llvm_unreachable("Unexpected size"); 2013 } else if (Name.endswith(".128")) { 2014 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 2015 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 2016 : Intrinsic::x86_sse2_psrl_d; 2017 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 2018 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 2019 : Intrinsic::x86_sse2_psrl_q; 2020 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 2021 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 2022 : Intrinsic::x86_sse2_psrl_w; 2023 else 2024 llvm_unreachable("Unexpected size"); 2025 } else if (Name.endswith(".256")) { 2026 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 2027 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 2028 : Intrinsic::x86_avx2_psrl_d; 2029 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 2030 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 2031 : Intrinsic::x86_avx2_psrl_q; 2032 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 2033 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 2034 : Intrinsic::x86_avx2_psrl_w; 2035 else 2036 llvm_unreachable("Unexpected size"); 2037 } else { 2038 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 2039 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 2040 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 2041 Intrinsic::x86_avx512_psrl_d_512; 2042 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 2043 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 2044 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 2045 Intrinsic::x86_avx512_psrl_q_512; 2046 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 2047 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 2048 : Intrinsic::x86_avx512_psrl_w_512; 2049 else 2050 llvm_unreachable("Unexpected size"); 2051 } 2052 2053 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2054 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 2055 bool IsImmediate = Name[16] == 'i' || 2056 (Name.size() > 18 && Name[18] == 'i'); 2057 bool IsVariable = Name[16] == 'v'; 2058 char Size = Name[16] == '.' ? Name[17] : 2059 Name[17] == '.' ? Name[18] : 2060 Name[18] == '.' ? Name[19] : 2061 Name[20]; 2062 2063 Intrinsic::ID IID; 2064 if (IsVariable && Name[17] != '.') { 2065 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 2066 IID = Intrinsic::x86_avx2_psrav_d; 2067 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 2068 IID = Intrinsic::x86_avx2_psrav_d_256; 2069 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 2070 IID = Intrinsic::x86_avx512_psrav_w_128; 2071 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 2072 IID = Intrinsic::x86_avx512_psrav_w_256; 2073 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 2074 IID = Intrinsic::x86_avx512_psrav_w_512; 2075 else 2076 llvm_unreachable("Unexpected size"); 2077 } else if (Name.endswith(".128")) { 2078 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 2079 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 2080 : Intrinsic::x86_sse2_psra_d; 2081 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 2082 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 2083 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 2084 Intrinsic::x86_avx512_psra_q_128; 2085 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 2086 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 2087 : Intrinsic::x86_sse2_psra_w; 2088 else 2089 llvm_unreachable("Unexpected size"); 2090 } else if (Name.endswith(".256")) { 2091 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 2092 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 2093 : Intrinsic::x86_avx2_psra_d; 2094 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 2095 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 2096 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 2097 Intrinsic::x86_avx512_psra_q_256; 2098 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 2099 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 2100 : Intrinsic::x86_avx2_psra_w; 2101 else 2102 llvm_unreachable("Unexpected size"); 2103 } else { 2104 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 2105 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 2106 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 2107 Intrinsic::x86_avx512_psra_d_512; 2108 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 2109 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 2110 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 2111 Intrinsic::x86_avx512_psra_q_512; 2112 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 2113 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 2114 : Intrinsic::x86_avx512_psra_w_512; 2115 else 2116 llvm_unreachable("Unexpected size"); 2117 } 2118 2119 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2120 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 2121 Rep = upgradeMaskedMove(Builder, *CI); 2122 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 2123 Rep = UpgradeMaskToInt(Builder, *CI); 2124 } else if (IsX86 && Name.startswith("avx512.mask.vpermilvar.")) { 2125 Intrinsic::ID IID; 2126 if (Name.endswith("ps.128")) 2127 IID = Intrinsic::x86_avx_vpermilvar_ps; 2128 else if (Name.endswith("pd.128")) 2129 IID = Intrinsic::x86_avx_vpermilvar_pd; 2130 else if (Name.endswith("ps.256")) 2131 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 2132 else if (Name.endswith("pd.256")) 2133 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 2134 else if (Name.endswith("ps.512")) 2135 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 2136 else if (Name.endswith("pd.512")) 2137 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 2138 else 2139 llvm_unreachable("Unexpected vpermilvar intrinsic"); 2140 2141 Function *Intrin = Intrinsic::getDeclaration(F->getParent(), IID); 2142 Rep = Builder.CreateCall(Intrin, 2143 { CI->getArgOperand(0), CI->getArgOperand(1) }); 2144 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2145 CI->getArgOperand(2)); 2146 } else if (IsX86 && Name.endswith(".movntdqa")) { 2147 Module *M = F->getParent(); 2148 MDNode *Node = MDNode::get( 2149 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 2150 2151 Value *Ptr = CI->getArgOperand(0); 2152 VectorType *VTy = cast<VectorType>(CI->getType()); 2153 2154 // Convert the type of the pointer to a pointer to the stored type. 2155 Value *BC = 2156 Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); 2157 LoadInst *LI = Builder.CreateAlignedLoad(BC, VTy->getBitWidth() / 8); 2158 LI->setMetadata(M->getMDKindID("nontemporal"), Node); 2159 Rep = LI; 2160 } else if (IsX86 && 2161 (Name.startswith("sse2.pavg") || Name.startswith("avx2.pavg") || 2162 Name.startswith("avx512.mask.pavg"))) { 2163 // llvm.x86.sse2.pavg.b/w, llvm.x86.avx2.pavg.b/w, 2164 // llvm.x86.avx512.mask.pavg.b/w 2165 Value *A = CI->getArgOperand(0); 2166 Value *B = CI->getArgOperand(1); 2167 VectorType *ZextType = VectorType::getExtendedElementVectorType( 2168 cast<VectorType>(A->getType())); 2169 Value *ExtendedA = Builder.CreateZExt(A, ZextType); 2170 Value *ExtendedB = Builder.CreateZExt(B, ZextType); 2171 Value *Sum = Builder.CreateAdd(ExtendedA, ExtendedB); 2172 Value *AddOne = Builder.CreateAdd(Sum, ConstantInt::get(ZextType, 1)); 2173 Value *ShiftR = Builder.CreateLShr(AddOne, ConstantInt::get(ZextType, 1)); 2174 Rep = Builder.CreateTrunc(ShiftR, A->getType()); 2175 if (CI->getNumArgOperands() > 2) { 2176 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2177 CI->getArgOperand(2)); 2178 } 2179 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 2180 Value *Arg = CI->getArgOperand(0); 2181 Value *Neg = Builder.CreateNeg(Arg, "neg"); 2182 Value *Cmp = Builder.CreateICmpSGE( 2183 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 2184 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 2185 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 2186 Name == "max.ui" || Name == "max.ull")) { 2187 Value *Arg0 = CI->getArgOperand(0); 2188 Value *Arg1 = CI->getArgOperand(1); 2189 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 2190 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 2191 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 2192 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 2193 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 2194 Name == "min.ui" || Name == "min.ull")) { 2195 Value *Arg0 = CI->getArgOperand(0); 2196 Value *Arg1 = CI->getArgOperand(1); 2197 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 2198 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 2199 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 2200 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 2201 } else if (IsNVVM && Name == "clz.ll") { 2202 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 2203 Value *Arg = CI->getArgOperand(0); 2204 Value *Ctlz = Builder.CreateCall( 2205 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 2206 {Arg->getType()}), 2207 {Arg, Builder.getFalse()}, "ctlz"); 2208 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 2209 } else if (IsNVVM && Name == "popc.ll") { 2210 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 2211 // i64. 2212 Value *Arg = CI->getArgOperand(0); 2213 Value *Popc = Builder.CreateCall( 2214 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 2215 {Arg->getType()}), 2216 Arg, "ctpop"); 2217 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 2218 } else if (IsNVVM && Name == "h2f") { 2219 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 2220 F->getParent(), Intrinsic::convert_from_fp16, 2221 {Builder.getFloatTy()}), 2222 CI->getArgOperand(0), "h2f"); 2223 } else { 2224 llvm_unreachable("Unknown function for CallInst upgrade."); 2225 } 2226 2227 if (Rep) 2228 CI->replaceAllUsesWith(Rep); 2229 CI->eraseFromParent(); 2230 return; 2231 } 2232 2233 const auto &DefaultCase = [&NewFn, &CI]() -> void { 2234 // Handle generic mangling change, but nothing else 2235 assert( 2236 (CI->getCalledFunction()->getName() != NewFn->getName()) && 2237 "Unknown function for CallInst upgrade and isn't just a name change"); 2238 CI->setCalledFunction(NewFn); 2239 }; 2240 CallInst *NewCall = nullptr; 2241 switch (NewFn->getIntrinsicID()) { 2242 default: { 2243 DefaultCase(); 2244 return; 2245 } 2246 2247 case Intrinsic::arm_neon_vld1: 2248 case Intrinsic::arm_neon_vld2: 2249 case Intrinsic::arm_neon_vld3: 2250 case Intrinsic::arm_neon_vld4: 2251 case Intrinsic::arm_neon_vld2lane: 2252 case Intrinsic::arm_neon_vld3lane: 2253 case Intrinsic::arm_neon_vld4lane: 2254 case Intrinsic::arm_neon_vst1: 2255 case Intrinsic::arm_neon_vst2: 2256 case Intrinsic::arm_neon_vst3: 2257 case Intrinsic::arm_neon_vst4: 2258 case Intrinsic::arm_neon_vst2lane: 2259 case Intrinsic::arm_neon_vst3lane: 2260 case Intrinsic::arm_neon_vst4lane: { 2261 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 2262 CI->arg_operands().end()); 2263 NewCall = Builder.CreateCall(NewFn, Args); 2264 break; 2265 } 2266 2267 case Intrinsic::bitreverse: 2268 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 2269 break; 2270 2271 case Intrinsic::ctlz: 2272 case Intrinsic::cttz: 2273 assert(CI->getNumArgOperands() == 1 && 2274 "Mismatch between function args and call args"); 2275 NewCall = 2276 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 2277 break; 2278 2279 case Intrinsic::objectsize: { 2280 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 2281 ? Builder.getFalse() 2282 : CI->getArgOperand(2); 2283 NewCall = Builder.CreateCall( 2284 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize}); 2285 break; 2286 } 2287 2288 case Intrinsic::ctpop: 2289 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 2290 break; 2291 2292 case Intrinsic::convert_from_fp16: 2293 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 2294 break; 2295 2296 case Intrinsic::dbg_value: 2297 // Upgrade from the old version that had an extra offset argument. 2298 assert(CI->getNumArgOperands() == 4); 2299 // Drop nonzero offsets instead of attempting to upgrade them. 2300 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1))) 2301 if (Offset->isZeroValue()) { 2302 NewCall = Builder.CreateCall( 2303 NewFn, 2304 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)}); 2305 break; 2306 } 2307 CI->eraseFromParent(); 2308 return; 2309 2310 case Intrinsic::x86_xop_vfrcz_ss: 2311 case Intrinsic::x86_xop_vfrcz_sd: 2312 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 2313 break; 2314 2315 case Intrinsic::x86_xop_vpermil2pd: 2316 case Intrinsic::x86_xop_vpermil2ps: 2317 case Intrinsic::x86_xop_vpermil2pd_256: 2318 case Intrinsic::x86_xop_vpermil2ps_256: { 2319 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 2320 CI->arg_operands().end()); 2321 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 2322 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 2323 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 2324 NewCall = Builder.CreateCall(NewFn, Args); 2325 break; 2326 } 2327 2328 case Intrinsic::x86_sse41_ptestc: 2329 case Intrinsic::x86_sse41_ptestz: 2330 case Intrinsic::x86_sse41_ptestnzc: { 2331 // The arguments for these intrinsics used to be v4f32, and changed 2332 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 2333 // So, the only thing required is a bitcast for both arguments. 2334 // First, check the arguments have the old type. 2335 Value *Arg0 = CI->getArgOperand(0); 2336 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 2337 return; 2338 2339 // Old intrinsic, add bitcasts 2340 Value *Arg1 = CI->getArgOperand(1); 2341 2342 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 2343 2344 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 2345 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 2346 2347 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 2348 break; 2349 } 2350 2351 case Intrinsic::x86_sse41_insertps: 2352 case Intrinsic::x86_sse41_dppd: 2353 case Intrinsic::x86_sse41_dpps: 2354 case Intrinsic::x86_sse41_mpsadbw: 2355 case Intrinsic::x86_avx_dp_ps_256: 2356 case Intrinsic::x86_avx2_mpsadbw: { 2357 // Need to truncate the last argument from i32 to i8 -- this argument models 2358 // an inherently 8-bit immediate operand to these x86 instructions. 2359 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 2360 CI->arg_operands().end()); 2361 2362 // Replace the last argument with a trunc. 2363 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 2364 NewCall = Builder.CreateCall(NewFn, Args); 2365 break; 2366 } 2367 2368 case Intrinsic::thread_pointer: { 2369 NewCall = Builder.CreateCall(NewFn, {}); 2370 break; 2371 } 2372 2373 case Intrinsic::invariant_start: 2374 case Intrinsic::invariant_end: 2375 case Intrinsic::masked_load: 2376 case Intrinsic::masked_store: 2377 case Intrinsic::masked_gather: 2378 case Intrinsic::masked_scatter: { 2379 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 2380 CI->arg_operands().end()); 2381 NewCall = Builder.CreateCall(NewFn, Args); 2382 break; 2383 } 2384 2385 case Intrinsic::memcpy: 2386 case Intrinsic::memmove: 2387 case Intrinsic::memset: { 2388 // We have to make sure that the call signature is what we're expecting. 2389 // We only want to change the old signatures by removing the alignment arg: 2390 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1) 2391 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1) 2392 // @llvm.memset...(i8*, i8, i[32|64], i32, i1) 2393 // -> @llvm.memset...(i8*, i8, i[32|64], i1) 2394 // Note: i8*'s in the above can be any pointer type 2395 if (CI->getNumArgOperands() != 5) { 2396 DefaultCase(); 2397 return; 2398 } 2399 // Remove alignment argument (3), and add alignment attributes to the 2400 // dest/src pointers. 2401 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), 2402 CI->getArgOperand(2), CI->getArgOperand(4)}; 2403 NewCall = Builder.CreateCall(NewFn, Args); 2404 auto *MemCI = cast<MemIntrinsic>(NewCall); 2405 // All mem intrinsics support dest alignment. 2406 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3)); 2407 MemCI->setDestAlignment(Align->getZExtValue()); 2408 // Memcpy/Memmove also support source alignment. 2409 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) 2410 MTI->setSourceAlignment(Align->getZExtValue()); 2411 break; 2412 } 2413 } 2414 assert(NewCall && "Should have either set this variable or returned through " 2415 "the default case"); 2416 std::string Name = CI->getName(); 2417 if (!Name.empty()) { 2418 CI->setName(Name + ".old"); 2419 NewCall->setName(Name); 2420 } 2421 CI->replaceAllUsesWith(NewCall); 2422 CI->eraseFromParent(); 2423 } 2424 2425 void llvm::UpgradeCallsToIntrinsic(Function *F) { 2426 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 2427 2428 // Check if this function should be upgraded and get the replacement function 2429 // if there is one. 2430 Function *NewFn; 2431 if (UpgradeIntrinsicFunction(F, NewFn)) { 2432 // Replace all users of the old function with the new function or new 2433 // instructions. This is not a range loop because the call is deleted. 2434 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 2435 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 2436 UpgradeIntrinsicCall(CI, NewFn); 2437 2438 // Remove old function, no longer used, from the module. 2439 F->eraseFromParent(); 2440 } 2441 } 2442 2443 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 2444 // Check if the tag uses struct-path aware TBAA format. 2445 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 2446 return &MD; 2447 2448 auto &Context = MD.getContext(); 2449 if (MD.getNumOperands() == 3) { 2450 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 2451 MDNode *ScalarType = MDNode::get(Context, Elts); 2452 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 2453 Metadata *Elts2[] = {ScalarType, ScalarType, 2454 ConstantAsMetadata::get( 2455 Constant::getNullValue(Type::getInt64Ty(Context))), 2456 MD.getOperand(2)}; 2457 return MDNode::get(Context, Elts2); 2458 } 2459 // Create a MDNode <MD, MD, offset 0> 2460 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 2461 Type::getInt64Ty(Context)))}; 2462 return MDNode::get(Context, Elts); 2463 } 2464 2465 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 2466 Instruction *&Temp) { 2467 if (Opc != Instruction::BitCast) 2468 return nullptr; 2469 2470 Temp = nullptr; 2471 Type *SrcTy = V->getType(); 2472 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 2473 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 2474 LLVMContext &Context = V->getContext(); 2475 2476 // We have no information about target data layout, so we assume that 2477 // the maximum pointer size is 64bit. 2478 Type *MidTy = Type::getInt64Ty(Context); 2479 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 2480 2481 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 2482 } 2483 2484 return nullptr; 2485 } 2486 2487 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 2488 if (Opc != Instruction::BitCast) 2489 return nullptr; 2490 2491 Type *SrcTy = C->getType(); 2492 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 2493 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 2494 LLVMContext &Context = C->getContext(); 2495 2496 // We have no information about target data layout, so we assume that 2497 // the maximum pointer size is 64bit. 2498 Type *MidTy = Type::getInt64Ty(Context); 2499 2500 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 2501 DestTy); 2502 } 2503 2504 return nullptr; 2505 } 2506 2507 /// Check the debug info version number, if it is out-dated, drop the debug 2508 /// info. Return true if module is modified. 2509 bool llvm::UpgradeDebugInfo(Module &M) { 2510 unsigned Version = getDebugMetadataVersionFromModule(M); 2511 if (Version == DEBUG_METADATA_VERSION) { 2512 bool BrokenDebugInfo = false; 2513 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo)) 2514 report_fatal_error("Broken module found, compilation aborted!"); 2515 if (!BrokenDebugInfo) 2516 // Everything is ok. 2517 return false; 2518 else { 2519 // Diagnose malformed debug info. 2520 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M); 2521 M.getContext().diagnose(Diag); 2522 } 2523 } 2524 bool Modified = StripDebugInfo(M); 2525 if (Modified && Version != DEBUG_METADATA_VERSION) { 2526 // Diagnose a version mismatch. 2527 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 2528 M.getContext().diagnose(DiagVersion); 2529 } 2530 return Modified; 2531 } 2532 2533 bool llvm::UpgradeModuleFlags(Module &M) { 2534 NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 2535 if (!ModFlags) 2536 return false; 2537 2538 bool HasObjCFlag = false, HasClassProperties = false, Changed = false; 2539 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 2540 MDNode *Op = ModFlags->getOperand(I); 2541 if (Op->getNumOperands() != 3) 2542 continue; 2543 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 2544 if (!ID) 2545 continue; 2546 if (ID->getString() == "Objective-C Image Info Version") 2547 HasObjCFlag = true; 2548 if (ID->getString() == "Objective-C Class Properties") 2549 HasClassProperties = true; 2550 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two 2551 // field was Error and now they are Max. 2552 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") { 2553 if (auto *Behavior = 2554 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) { 2555 if (Behavior->getLimitedValue() == Module::Error) { 2556 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 2557 Metadata *Ops[3] = { 2558 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)), 2559 MDString::get(M.getContext(), ID->getString()), 2560 Op->getOperand(2)}; 2561 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 2562 Changed = true; 2563 } 2564 } 2565 } 2566 // Upgrade Objective-C Image Info Section. Removed the whitespce in the 2567 // section name so that llvm-lto will not complain about mismatching 2568 // module flags that is functionally the same. 2569 if (ID->getString() == "Objective-C Image Info Section") { 2570 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) { 2571 SmallVector<StringRef, 4> ValueComp; 2572 Value->getString().split(ValueComp, " "); 2573 if (ValueComp.size() != 1) { 2574 std::string NewValue; 2575 for (auto &S : ValueComp) 2576 NewValue += S.str(); 2577 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1), 2578 MDString::get(M.getContext(), NewValue)}; 2579 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 2580 Changed = true; 2581 } 2582 } 2583 } 2584 } 2585 2586 // "Objective-C Class Properties" is recently added for Objective-C. We 2587 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 2588 // flag of value 0, so we can correclty downgrade this flag when trying to 2589 // link an ObjC bitcode without this module flag with an ObjC bitcode with 2590 // this module flag. 2591 if (HasObjCFlag && !HasClassProperties) { 2592 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 2593 (uint32_t)0); 2594 Changed = true; 2595 } 2596 2597 return Changed; 2598 } 2599 2600 void llvm::UpgradeSectionAttributes(Module &M) { 2601 auto TrimSpaces = [](StringRef Section) -> std::string { 2602 SmallVector<StringRef, 5> Components; 2603 Section.split(Components, ','); 2604 2605 SmallString<32> Buffer; 2606 raw_svector_ostream OS(Buffer); 2607 2608 for (auto Component : Components) 2609 OS << ',' << Component.trim(); 2610 2611 return OS.str().substr(1); 2612 }; 2613 2614 for (auto &GV : M.globals()) { 2615 if (!GV.hasSection()) 2616 continue; 2617 2618 StringRef Section = GV.getSection(); 2619 2620 if (!Section.startswith("__DATA, __objc_catlist")) 2621 continue; 2622 2623 // __DATA, __objc_catlist, regular, no_dead_strip 2624 // __DATA,__objc_catlist,regular,no_dead_strip 2625 GV.setSection(TrimSpaces(Section)); 2626 } 2627 } 2628 2629 static bool isOldLoopArgument(Metadata *MD) { 2630 auto *T = dyn_cast_or_null<MDTuple>(MD); 2631 if (!T) 2632 return false; 2633 if (T->getNumOperands() < 1) 2634 return false; 2635 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 2636 if (!S) 2637 return false; 2638 return S->getString().startswith("llvm.vectorizer."); 2639 } 2640 2641 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 2642 StringRef OldPrefix = "llvm.vectorizer."; 2643 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 2644 2645 if (OldTag == "llvm.vectorizer.unroll") 2646 return MDString::get(C, "llvm.loop.interleave.count"); 2647 2648 return MDString::get( 2649 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 2650 .str()); 2651 } 2652 2653 static Metadata *upgradeLoopArgument(Metadata *MD) { 2654 auto *T = dyn_cast_or_null<MDTuple>(MD); 2655 if (!T) 2656 return MD; 2657 if (T->getNumOperands() < 1) 2658 return MD; 2659 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 2660 if (!OldTag) 2661 return MD; 2662 if (!OldTag->getString().startswith("llvm.vectorizer.")) 2663 return MD; 2664 2665 // This has an old tag. Upgrade it. 2666 SmallVector<Metadata *, 8> Ops; 2667 Ops.reserve(T->getNumOperands()); 2668 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 2669 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 2670 Ops.push_back(T->getOperand(I)); 2671 2672 return MDTuple::get(T->getContext(), Ops); 2673 } 2674 2675 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 2676 auto *T = dyn_cast<MDTuple>(&N); 2677 if (!T) 2678 return &N; 2679 2680 if (none_of(T->operands(), isOldLoopArgument)) 2681 return &N; 2682 2683 SmallVector<Metadata *, 8> Ops; 2684 Ops.reserve(T->getNumOperands()); 2685 for (Metadata *MD : T->operands()) 2686 Ops.push_back(upgradeLoopArgument(MD)); 2687 2688 return MDTuple::get(T->getContext(), Ops); 2689 } 2690