1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the auto-upgrade helper functions. 11 // This is where deprecated IR intrinsics and other IR features are updated to 12 // current specifications. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/IR/AutoUpgrade.h" 17 #include "llvm/ADT/StringSwitch.h" 18 #include "llvm/IR/CFG.h" 19 #include "llvm/IR/CallSite.h" 20 #include "llvm/IR/Constants.h" 21 #include "llvm/IR/DIBuilder.h" 22 #include "llvm/IR/DebugInfo.h" 23 #include "llvm/IR/DiagnosticInfo.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/IRBuilder.h" 26 #include "llvm/IR/Instruction.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/IR/LLVMContext.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/Support/ErrorHandling.h" 31 #include "llvm/Support/Regex.h" 32 #include <cstring> 33 using namespace llvm; 34 35 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 36 37 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 38 // changed their type from v4f32 to v2i64. 39 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 40 Function *&NewFn) { 41 // Check whether this is an old version of the function, which received 42 // v4f32 arguments. 43 Type *Arg0Type = F->getFunctionType()->getParamType(0); 44 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 45 return false; 46 47 // Yes, it's old, replace it with new version. 48 rename(F); 49 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 50 return true; 51 } 52 53 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 54 // arguments have changed their type from i32 to i8. 55 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 56 Function *&NewFn) { 57 // Check that the last argument is an i32. 58 Type *LastArgType = F->getFunctionType()->getParamType( 59 F->getFunctionType()->getNumParams() - 1); 60 if (!LastArgType->isIntegerTy(32)) 61 return false; 62 63 // Move this function aside and map down. 64 rename(F); 65 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 66 return true; 67 } 68 69 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 70 // All of the intrinsics matches below should be marked with which llvm 71 // version started autoupgrading them. At some point in the future we would 72 // like to use this information to remove upgrade code for some older 73 // intrinsics. It is currently undecided how we will determine that future 74 // point. 75 if (Name.startswith("sse2.pcmpeq.") || // Added in 3.1 76 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 77 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 78 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 79 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 80 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 81 Name == "sse.add.ss" || // Added in 4.0 82 Name == "sse2.add.sd" || // Added in 4.0 83 Name == "sse.sub.ss" || // Added in 4.0 84 Name == "sse2.sub.sd" || // Added in 4.0 85 Name == "sse.mul.ss" || // Added in 4.0 86 Name == "sse2.mul.sd" || // Added in 4.0 87 Name == "sse.div.ss" || // Added in 4.0 88 Name == "sse2.div.sd" || // Added in 4.0 89 Name == "sse41.pmaxsb" || // Added in 3.9 90 Name == "sse2.pmaxs.w" || // Added in 3.9 91 Name == "sse41.pmaxsd" || // Added in 3.9 92 Name == "sse2.pmaxu.b" || // Added in 3.9 93 Name == "sse41.pmaxuw" || // Added in 3.9 94 Name == "sse41.pmaxud" || // Added in 3.9 95 Name == "sse41.pminsb" || // Added in 3.9 96 Name == "sse2.pmins.w" || // Added in 3.9 97 Name == "sse41.pminsd" || // Added in 3.9 98 Name == "sse2.pminu.b" || // Added in 3.9 99 Name == "sse41.pminuw" || // Added in 3.9 100 Name == "sse41.pminud" || // Added in 3.9 101 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 102 Name.startswith("avx2.pmax") || // Added in 3.9 103 Name.startswith("avx2.pmin") || // Added in 3.9 104 Name.startswith("avx512.mask.pmax") || // Added in 4.0 105 Name.startswith("avx512.mask.pmin") || // Added in 4.0 106 Name.startswith("avx2.vbroadcast") || // Added in 3.8 107 Name.startswith("avx2.pbroadcast") || // Added in 3.8 108 Name.startswith("avx.vpermil.") || // Added in 3.1 109 Name.startswith("sse2.pshuf") || // Added in 3.9 110 Name.startswith("avx512.pbroadcast") || // Added in 3.9 111 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 112 Name.startswith("avx512.mask.movddup") || // Added in 3.9 113 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 114 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 115 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 116 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 117 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 118 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 119 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 120 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 121 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 122 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 123 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 124 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 125 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 126 Name.startswith("avx512.mask.pand.") || // Added in 3.9 127 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 128 Name.startswith("avx512.mask.por.") || // Added in 3.9 129 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 130 Name.startswith("avx512.mask.and.") || // Added in 3.9 131 Name.startswith("avx512.mask.andn.") || // Added in 3.9 132 Name.startswith("avx512.mask.or.") || // Added in 3.9 133 Name.startswith("avx512.mask.xor.") || // Added in 3.9 134 Name.startswith("avx512.mask.padd.") || // Added in 4.0 135 Name.startswith("avx512.mask.psub.") || // Added in 4.0 136 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 137 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 138 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 139 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 140 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 141 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 142 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 143 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 144 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 145 Name == "avx512.mask.add.pd.128" || // Added in 4.0 146 Name == "avx512.mask.add.pd.256" || // Added in 4.0 147 Name == "avx512.mask.add.ps.128" || // Added in 4.0 148 Name == "avx512.mask.add.ps.256" || // Added in 4.0 149 Name == "avx512.mask.div.pd.128" || // Added in 4.0 150 Name == "avx512.mask.div.pd.256" || // Added in 4.0 151 Name == "avx512.mask.div.ps.128" || // Added in 4.0 152 Name == "avx512.mask.div.ps.256" || // Added in 4.0 153 Name == "avx512.mask.mul.pd.128" || // Added in 4.0 154 Name == "avx512.mask.mul.pd.256" || // Added in 4.0 155 Name == "avx512.mask.mul.ps.128" || // Added in 4.0 156 Name == "avx512.mask.mul.ps.256" || // Added in 4.0 157 Name == "avx512.mask.sub.pd.128" || // Added in 4.0 158 Name == "avx512.mask.sub.pd.256" || // Added in 4.0 159 Name == "avx512.mask.sub.ps.128" || // Added in 4.0 160 Name == "avx512.mask.sub.ps.256" || // Added in 4.0 161 Name == "avx512.mask.max.pd.128" || // Added in 5.0 162 Name == "avx512.mask.max.pd.256" || // Added in 5.0 163 Name == "avx512.mask.max.ps.128" || // Added in 5.0 164 Name == "avx512.mask.max.ps.256" || // Added in 5.0 165 Name == "avx512.mask.min.pd.128" || // Added in 5.0 166 Name == "avx512.mask.min.pd.256" || // Added in 5.0 167 Name == "avx512.mask.min.ps.128" || // Added in 5.0 168 Name == "avx512.mask.min.ps.256" || // Added in 5.0 169 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 170 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 171 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 172 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 173 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 174 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 175 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 176 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 177 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 178 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 179 Name.startswith("avx512.mask.pslli") || // Added in 4.0 180 Name.startswith("avx512.mask.psrai") || // Added in 4.0 181 Name.startswith("avx512.mask.psrli") || // Added in 4.0 182 Name.startswith("avx512.mask.psllv") || // Added in 4.0 183 Name.startswith("avx512.mask.psrav") || // Added in 4.0 184 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 185 Name.startswith("sse41.pmovsx") || // Added in 3.8 186 Name.startswith("sse41.pmovzx") || // Added in 3.9 187 Name.startswith("avx2.pmovsx") || // Added in 3.9 188 Name.startswith("avx2.pmovzx") || // Added in 3.9 189 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 190 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 191 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 192 Name == "sse2.cvtdq2pd" || // Added in 3.9 193 Name == "sse2.cvtps2pd" || // Added in 3.9 194 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 195 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 196 Name.startswith("avx.vinsertf128.") || // Added in 3.7 197 Name == "avx2.vinserti128" || // Added in 3.7 198 Name.startswith("avx512.mask.insert") || // Added in 4.0 199 Name.startswith("avx.vextractf128.") || // Added in 3.7 200 Name == "avx2.vextracti128" || // Added in 3.7 201 Name.startswith("avx512.mask.vextract") || // Added in 4.0 202 Name.startswith("sse4a.movnt.") || // Added in 3.9 203 Name.startswith("avx.movnt.") || // Added in 3.2 204 Name.startswith("avx512.storent.") || // Added in 3.9 205 Name == "sse2.storel.dq" || // Added in 3.9 206 Name.startswith("sse.storeu.") || // Added in 3.9 207 Name.startswith("sse2.storeu.") || // Added in 3.9 208 Name.startswith("avx.storeu.") || // Added in 3.9 209 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 210 Name.startswith("avx512.mask.store.p") || // Added in 3.9 211 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 212 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 213 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 214 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 215 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 216 Name.startswith("avx512.mask.load.") || // Added in 3.9 217 Name == "sse42.crc32.64.8" || // Added in 3.4 218 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 219 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 220 Name.startswith("avx512.mask.valign.") || // Added in 4.0 221 Name.startswith("sse2.psll.dq") || // Added in 3.7 222 Name.startswith("sse2.psrl.dq") || // Added in 3.7 223 Name.startswith("avx2.psll.dq") || // Added in 3.7 224 Name.startswith("avx2.psrl.dq") || // Added in 3.7 225 Name.startswith("avx512.psll.dq") || // Added in 3.9 226 Name.startswith("avx512.psrl.dq") || // Added in 3.9 227 Name == "sse41.pblendw" || // Added in 3.7 228 Name.startswith("sse41.blendp") || // Added in 3.7 229 Name.startswith("avx.blend.p") || // Added in 3.7 230 Name == "avx2.pblendw" || // Added in 3.7 231 Name.startswith("avx2.pblendd.") || // Added in 3.7 232 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 233 Name == "avx2.vbroadcasti128" || // Added in 3.7 234 Name == "xop.vpcmov" || // Added in 3.8 235 Name == "xop.vpcmov.256" || // Added in 5.0 236 Name.startswith("avx512.mask.move.s") || // Added in 4.0 237 Name.startswith("avx512.cvtmask2") || // Added in 5.0 238 (Name.startswith("xop.vpcom") && // Added in 3.2 239 F->arg_size() == 2)) 240 return true; 241 242 return false; 243 } 244 245 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 246 Function *&NewFn) { 247 // Only handle intrinsics that start with "x86.". 248 if (!Name.startswith("x86.")) 249 return false; 250 // Remove "x86." prefix. 251 Name = Name.substr(4); 252 253 if (ShouldUpgradeX86Intrinsic(F, Name)) { 254 NewFn = nullptr; 255 return true; 256 } 257 258 // SSE4.1 ptest functions may have an old signature. 259 if (Name.startswith("sse41.ptest")) { // Added in 3.2 260 if (Name.substr(11) == "c") 261 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 262 if (Name.substr(11) == "z") 263 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 264 if (Name.substr(11) == "nzc") 265 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 266 } 267 // Several blend and other instructions with masks used the wrong number of 268 // bits. 269 if (Name == "sse41.insertps") // Added in 3.6 270 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 271 NewFn); 272 if (Name == "sse41.dppd") // Added in 3.6 273 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 274 NewFn); 275 if (Name == "sse41.dpps") // Added in 3.6 276 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 277 NewFn); 278 if (Name == "sse41.mpsadbw") // Added in 3.6 279 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 280 NewFn); 281 if (Name == "avx.dp.ps.256") // Added in 3.6 282 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 283 NewFn); 284 if (Name == "avx2.mpsadbw") // Added in 3.6 285 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 286 NewFn); 287 288 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 289 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 290 rename(F); 291 NewFn = Intrinsic::getDeclaration(F->getParent(), 292 Intrinsic::x86_xop_vfrcz_ss); 293 return true; 294 } 295 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 296 rename(F); 297 NewFn = Intrinsic::getDeclaration(F->getParent(), 298 Intrinsic::x86_xop_vfrcz_sd); 299 return true; 300 } 301 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 302 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 303 auto Idx = F->getFunctionType()->getParamType(2); 304 if (Idx->isFPOrFPVectorTy()) { 305 rename(F); 306 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 307 unsigned EltSize = Idx->getScalarSizeInBits(); 308 Intrinsic::ID Permil2ID; 309 if (EltSize == 64 && IdxSize == 128) 310 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 311 else if (EltSize == 32 && IdxSize == 128) 312 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 313 else if (EltSize == 64 && IdxSize == 256) 314 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 315 else 316 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 317 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 318 return true; 319 } 320 } 321 322 return false; 323 } 324 325 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 326 assert(F && "Illegal to upgrade a non-existent Function."); 327 328 // Quickly eliminate it, if it's not a candidate. 329 StringRef Name = F->getName(); 330 if (Name.size() <= 8 || !Name.startswith("llvm.")) 331 return false; 332 Name = Name.substr(5); // Strip off "llvm." 333 334 switch (Name[0]) { 335 default: break; 336 case 'a': { 337 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 338 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 339 F->arg_begin()->getType()); 340 return true; 341 } 342 if (Name.startswith("arm.neon.vclz")) { 343 Type* args[2] = { 344 F->arg_begin()->getType(), 345 Type::getInt1Ty(F->getContext()) 346 }; 347 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 348 // the end of the name. Change name from llvm.arm.neon.vclz.* to 349 // llvm.ctlz.* 350 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 351 NewFn = Function::Create(fType, F->getLinkage(), 352 "llvm.ctlz." + Name.substr(14), F->getParent()); 353 return true; 354 } 355 if (Name.startswith("arm.neon.vcnt")) { 356 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 357 F->arg_begin()->getType()); 358 return true; 359 } 360 Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 361 if (vldRegex.match(Name)) { 362 auto fArgs = F->getFunctionType()->params(); 363 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 364 // Can't use Intrinsic::getDeclaration here as the return types might 365 // then only be structurally equal. 366 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 367 NewFn = Function::Create(fType, F->getLinkage(), 368 "llvm." + Name + ".p0i8", F->getParent()); 369 return true; 370 } 371 Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 372 if (vstRegex.match(Name)) { 373 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 374 Intrinsic::arm_neon_vst2, 375 Intrinsic::arm_neon_vst3, 376 Intrinsic::arm_neon_vst4}; 377 378 static const Intrinsic::ID StoreLaneInts[] = { 379 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 380 Intrinsic::arm_neon_vst4lane 381 }; 382 383 auto fArgs = F->getFunctionType()->params(); 384 Type *Tys[] = {fArgs[0], fArgs[1]}; 385 if (Name.find("lane") == StringRef::npos) 386 NewFn = Intrinsic::getDeclaration(F->getParent(), 387 StoreInts[fArgs.size() - 3], Tys); 388 else 389 NewFn = Intrinsic::getDeclaration(F->getParent(), 390 StoreLaneInts[fArgs.size() - 5], Tys); 391 return true; 392 } 393 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 394 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 395 return true; 396 } 397 break; 398 } 399 400 case 'c': { 401 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 402 rename(F); 403 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 404 F->arg_begin()->getType()); 405 return true; 406 } 407 if (Name.startswith("cttz.") && F->arg_size() == 1) { 408 rename(F); 409 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 410 F->arg_begin()->getType()); 411 return true; 412 } 413 break; 414 } 415 case 'i': 416 case 'l': { 417 bool IsLifetimeStart = Name.startswith("lifetime.start"); 418 if (IsLifetimeStart || Name.startswith("invariant.start")) { 419 Intrinsic::ID ID = IsLifetimeStart ? 420 Intrinsic::lifetime_start : Intrinsic::invariant_start; 421 auto Args = F->getFunctionType()->params(); 422 Type* ObjectPtr[1] = {Args[1]}; 423 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 424 rename(F); 425 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 426 return true; 427 } 428 } 429 430 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 431 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 432 Intrinsic::ID ID = IsLifetimeEnd ? 433 Intrinsic::lifetime_end : Intrinsic::invariant_end; 434 435 auto Args = F->getFunctionType()->params(); 436 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 437 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 438 rename(F); 439 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 440 return true; 441 } 442 } 443 break; 444 } 445 case 'm': { 446 if (Name.startswith("masked.load.")) { 447 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 448 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 449 rename(F); 450 NewFn = Intrinsic::getDeclaration(F->getParent(), 451 Intrinsic::masked_load, 452 Tys); 453 return true; 454 } 455 } 456 if (Name.startswith("masked.store.")) { 457 auto Args = F->getFunctionType()->params(); 458 Type *Tys[] = { Args[0], Args[1] }; 459 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 460 rename(F); 461 NewFn = Intrinsic::getDeclaration(F->getParent(), 462 Intrinsic::masked_store, 463 Tys); 464 return true; 465 } 466 } 467 break; 468 } 469 case 'n': { 470 if (Name.startswith("nvvm.")) { 471 Name = Name.substr(5); 472 473 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 474 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 475 .Cases("brev32", "brev64", Intrinsic::bitreverse) 476 .Case("clz.i", Intrinsic::ctlz) 477 .Case("popc.i", Intrinsic::ctpop) 478 .Default(Intrinsic::not_intrinsic); 479 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 480 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 481 {F->getReturnType()}); 482 return true; 483 } 484 485 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 486 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 487 // 488 // TODO: We could add lohi.i2d. 489 bool Expand = StringSwitch<bool>(Name) 490 .Cases("abs.i", "abs.ll", true) 491 .Cases("clz.ll", "popc.ll", "h2f", true) 492 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 493 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 494 .Default(false); 495 if (Expand) { 496 NewFn = nullptr; 497 return true; 498 } 499 } 500 } 501 case 'o': 502 // We only need to change the name to match the mangling including the 503 // address space. 504 if (Name.startswith("objectsize.")) { 505 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 506 if (F->arg_size() == 2 || 507 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 508 rename(F); 509 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 510 Tys); 511 return true; 512 } 513 } 514 break; 515 516 case 's': 517 if (Name == "stackprotectorcheck") { 518 NewFn = nullptr; 519 return true; 520 } 521 break; 522 523 case 'x': 524 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 525 return true; 526 } 527 // Remangle our intrinsic since we upgrade the mangling 528 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 529 if (Result != None) { 530 NewFn = Result.getValue(); 531 return true; 532 } 533 534 // This may not belong here. This function is effectively being overloaded 535 // to both detect an intrinsic which needs upgrading, and to provide the 536 // upgraded form of the intrinsic. We should perhaps have two separate 537 // functions for this. 538 return false; 539 } 540 541 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 542 NewFn = nullptr; 543 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 544 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 545 546 // Upgrade intrinsic attributes. This does not change the function. 547 if (NewFn) 548 F = NewFn; 549 if (Intrinsic::ID id = F->getIntrinsicID()) 550 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 551 return Upgraded; 552 } 553 554 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 555 // Nothing to do yet. 556 return false; 557 } 558 559 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 560 // to byte shuffles. 561 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 562 Value *Op, unsigned Shift) { 563 Type *ResultTy = Op->getType(); 564 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 565 566 // Bitcast from a 64-bit element type to a byte element type. 567 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 568 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 569 570 // We'll be shuffling in zeroes. 571 Value *Res = Constant::getNullValue(VecTy); 572 573 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 574 // we'll just return the zero vector. 575 if (Shift < 16) { 576 uint32_t Idxs[64]; 577 // 256/512-bit version is split into 2/4 16-byte lanes. 578 for (unsigned l = 0; l != NumElts; l += 16) 579 for (unsigned i = 0; i != 16; ++i) { 580 unsigned Idx = NumElts + i - Shift; 581 if (Idx < NumElts) 582 Idx -= NumElts - 16; // end of lane, switch operand. 583 Idxs[l + i] = Idx + l; 584 } 585 586 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 587 } 588 589 // Bitcast back to a 64-bit element type. 590 return Builder.CreateBitCast(Res, ResultTy, "cast"); 591 } 592 593 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 594 // to byte shuffles. 595 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 596 unsigned Shift) { 597 Type *ResultTy = Op->getType(); 598 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 599 600 // Bitcast from a 64-bit element type to a byte element type. 601 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 602 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 603 604 // We'll be shuffling in zeroes. 605 Value *Res = Constant::getNullValue(VecTy); 606 607 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 608 // we'll just return the zero vector. 609 if (Shift < 16) { 610 uint32_t Idxs[64]; 611 // 256/512-bit version is split into 2/4 16-byte lanes. 612 for (unsigned l = 0; l != NumElts; l += 16) 613 for (unsigned i = 0; i != 16; ++i) { 614 unsigned Idx = i + Shift; 615 if (Idx >= 16) 616 Idx += NumElts - 16; // end of lane, switch operand. 617 Idxs[l + i] = Idx + l; 618 } 619 620 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 621 } 622 623 // Bitcast back to a 64-bit element type. 624 return Builder.CreateBitCast(Res, ResultTy, "cast"); 625 } 626 627 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 628 unsigned NumElts) { 629 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 630 cast<IntegerType>(Mask->getType())->getBitWidth()); 631 Mask = Builder.CreateBitCast(Mask, MaskTy); 632 633 // If we have less than 8 elements, then the starting mask was an i8 and 634 // we need to extract down to the right number of elements. 635 if (NumElts < 8) { 636 uint32_t Indices[4]; 637 for (unsigned i = 0; i != NumElts; ++i) 638 Indices[i] = i; 639 Mask = Builder.CreateShuffleVector(Mask, Mask, 640 makeArrayRef(Indices, NumElts), 641 "extract"); 642 } 643 644 return Mask; 645 } 646 647 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 648 Value *Op0, Value *Op1) { 649 // If the mask is all ones just emit the align operation. 650 if (const auto *C = dyn_cast<Constant>(Mask)) 651 if (C->isAllOnesValue()) 652 return Op0; 653 654 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 655 return Builder.CreateSelect(Mask, Op0, Op1); 656 } 657 658 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 659 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 660 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 661 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 662 Value *Op1, Value *Shift, 663 Value *Passthru, Value *Mask, 664 bool IsVALIGN) { 665 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 666 667 unsigned NumElts = Op0->getType()->getVectorNumElements(); 668 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 669 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 670 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 671 672 // Mask the immediate for VALIGN. 673 if (IsVALIGN) 674 ShiftVal &= (NumElts - 1); 675 676 // If palignr is shifting the pair of vectors more than the size of two 677 // lanes, emit zero. 678 if (ShiftVal >= 32) 679 return llvm::Constant::getNullValue(Op0->getType()); 680 681 // If palignr is shifting the pair of input vectors more than one lane, 682 // but less than two lanes, convert to shifting in zeroes. 683 if (ShiftVal > 16) { 684 ShiftVal -= 16; 685 Op1 = Op0; 686 Op0 = llvm::Constant::getNullValue(Op0->getType()); 687 } 688 689 uint32_t Indices[64]; 690 // 256-bit palignr operates on 128-bit lanes so we need to handle that 691 for (unsigned l = 0; l < NumElts; l += 16) { 692 for (unsigned i = 0; i != 16; ++i) { 693 unsigned Idx = ShiftVal + i; 694 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 695 Idx += NumElts - 16; // End of lane, switch operand. 696 Indices[l + i] = Idx + l; 697 } 698 } 699 700 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 701 makeArrayRef(Indices, NumElts), 702 "palignr"); 703 704 return EmitX86Select(Builder, Mask, Align, Passthru); 705 } 706 707 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 708 Value *Ptr, Value *Data, Value *Mask, 709 bool Aligned) { 710 // Cast the pointer to the right type. 711 Ptr = Builder.CreateBitCast(Ptr, 712 llvm::PointerType::getUnqual(Data->getType())); 713 unsigned Align = 714 Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1; 715 716 // If the mask is all ones just emit a regular store. 717 if (const auto *C = dyn_cast<Constant>(Mask)) 718 if (C->isAllOnesValue()) 719 return Builder.CreateAlignedStore(Data, Ptr, Align); 720 721 // Convert the mask from an integer type to a vector of i1. 722 unsigned NumElts = Data->getType()->getVectorNumElements(); 723 Mask = getX86MaskVec(Builder, Mask, NumElts); 724 return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); 725 } 726 727 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 728 Value *Ptr, Value *Passthru, Value *Mask, 729 bool Aligned) { 730 // Cast the pointer to the right type. 731 Ptr = Builder.CreateBitCast(Ptr, 732 llvm::PointerType::getUnqual(Passthru->getType())); 733 unsigned Align = 734 Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1; 735 736 // If the mask is all ones just emit a regular store. 737 if (const auto *C = dyn_cast<Constant>(Mask)) 738 if (C->isAllOnesValue()) 739 return Builder.CreateAlignedLoad(Ptr, Align); 740 741 // Convert the mask from an integer type to a vector of i1. 742 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 743 Mask = getX86MaskVec(Builder, Mask, NumElts); 744 return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); 745 } 746 747 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 748 ICmpInst::Predicate Pred) { 749 Value *Op0 = CI.getArgOperand(0); 750 Value *Op1 = CI.getArgOperand(1); 751 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 752 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 753 754 if (CI.getNumArgOperands() == 4) 755 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 756 757 return Res; 758 } 759 760 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 761 ICmpInst::Predicate Pred) { 762 Value *Op0 = CI.getArgOperand(0); 763 unsigned NumElts = Op0->getType()->getVectorNumElements(); 764 Value *Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 765 766 Value *Mask = CI.getArgOperand(2); 767 const auto *C = dyn_cast<Constant>(Mask); 768 if (!C || !C->isAllOnesValue()) 769 Cmp = Builder.CreateAnd(Cmp, getX86MaskVec(Builder, Mask, NumElts)); 770 771 if (NumElts < 8) { 772 uint32_t Indices[8]; 773 for (unsigned i = 0; i != NumElts; ++i) 774 Indices[i] = i; 775 for (unsigned i = NumElts; i != 8; ++i) 776 Indices[i] = NumElts + i % NumElts; 777 Cmp = Builder.CreateShuffleVector(Cmp, 778 Constant::getNullValue(Cmp->getType()), 779 Indices); 780 } 781 return Builder.CreateBitCast(Cmp, IntegerType::get(CI.getContext(), 782 std::max(NumElts, 8U))); 783 } 784 785 // Replace a masked intrinsic with an older unmasked intrinsic. 786 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 787 Intrinsic::ID IID) { 788 Function *F = CI.getCalledFunction(); 789 Function *Intrin = Intrinsic::getDeclaration(F->getParent(), IID); 790 Value *Rep = Builder.CreateCall(Intrin, 791 { CI.getArgOperand(0), CI.getArgOperand(1) }); 792 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 793 } 794 795 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 796 Value* A = CI.getArgOperand(0); 797 Value* B = CI.getArgOperand(1); 798 Value* Src = CI.getArgOperand(2); 799 Value* Mask = CI.getArgOperand(3); 800 801 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 802 Value* Cmp = Builder.CreateIsNotNull(AndNode); 803 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 804 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 805 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 806 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 807 } 808 809 810 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 811 Value* Op = CI.getArgOperand(0); 812 Type* ReturnOp = CI.getType(); 813 unsigned NumElts = CI.getType()->getVectorNumElements(); 814 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 815 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 816 } 817 818 /// Upgrade a call to an old intrinsic. All argument and return casting must be 819 /// provided to seamlessly integrate with existing context. 820 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 821 Function *F = CI->getCalledFunction(); 822 LLVMContext &C = CI->getContext(); 823 IRBuilder<> Builder(C); 824 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 825 826 assert(F && "Intrinsic call is not direct?"); 827 828 if (!NewFn) { 829 // Get the Function's name. 830 StringRef Name = F->getName(); 831 832 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 833 Name = Name.substr(5); 834 835 bool IsX86 = Name.startswith("x86."); 836 if (IsX86) 837 Name = Name.substr(4); 838 bool IsNVVM = Name.startswith("nvvm."); 839 if (IsNVVM) 840 Name = Name.substr(5); 841 842 if (IsX86 && Name.startswith("sse4a.movnt.")) { 843 Module *M = F->getParent(); 844 SmallVector<Metadata *, 1> Elts; 845 Elts.push_back( 846 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 847 MDNode *Node = MDNode::get(C, Elts); 848 849 Value *Arg0 = CI->getArgOperand(0); 850 Value *Arg1 = CI->getArgOperand(1); 851 852 // Nontemporal (unaligned) store of the 0'th element of the float/double 853 // vector. 854 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 855 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 856 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 857 Value *Extract = 858 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 859 860 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); 861 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 862 863 // Remove intrinsic. 864 CI->eraseFromParent(); 865 return; 866 } 867 868 if (IsX86 && (Name.startswith("avx.movnt.") || 869 Name.startswith("avx512.storent."))) { 870 Module *M = F->getParent(); 871 SmallVector<Metadata *, 1> Elts; 872 Elts.push_back( 873 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 874 MDNode *Node = MDNode::get(C, Elts); 875 876 Value *Arg0 = CI->getArgOperand(0); 877 Value *Arg1 = CI->getArgOperand(1); 878 879 // Convert the type of the pointer to a pointer to the stored type. 880 Value *BC = Builder.CreateBitCast(Arg0, 881 PointerType::getUnqual(Arg1->getType()), 882 "cast"); 883 VectorType *VTy = cast<VectorType>(Arg1->getType()); 884 StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 885 VTy->getBitWidth() / 8); 886 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 887 888 // Remove intrinsic. 889 CI->eraseFromParent(); 890 return; 891 } 892 893 if (IsX86 && Name == "sse2.storel.dq") { 894 Value *Arg0 = CI->getArgOperand(0); 895 Value *Arg1 = CI->getArgOperand(1); 896 897 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 898 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 899 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 900 Value *BC = Builder.CreateBitCast(Arg0, 901 PointerType::getUnqual(Elt->getType()), 902 "cast"); 903 Builder.CreateAlignedStore(Elt, BC, 1); 904 905 // Remove intrinsic. 906 CI->eraseFromParent(); 907 return; 908 } 909 910 if (IsX86 && (Name.startswith("sse.storeu.") || 911 Name.startswith("sse2.storeu.") || 912 Name.startswith("avx.storeu."))) { 913 Value *Arg0 = CI->getArgOperand(0); 914 Value *Arg1 = CI->getArgOperand(1); 915 916 Arg0 = Builder.CreateBitCast(Arg0, 917 PointerType::getUnqual(Arg1->getType()), 918 "cast"); 919 Builder.CreateAlignedStore(Arg1, Arg0, 1); 920 921 // Remove intrinsic. 922 CI->eraseFromParent(); 923 return; 924 } 925 926 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 927 // "avx512.mask.storeu." or "avx512.mask.store." 928 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 929 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 930 CI->getArgOperand(2), Aligned); 931 932 // Remove intrinsic. 933 CI->eraseFromParent(); 934 return; 935 } 936 937 Value *Rep; 938 // Upgrade packed integer vector compare intrinsics to compare instructions. 939 if (IsX86 && (Name.startswith("sse2.pcmp") || 940 Name.startswith("avx2.pcmp"))) { 941 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 942 bool CmpEq = Name[9] == 'e'; 943 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 944 CI->getArgOperand(0), CI->getArgOperand(1)); 945 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 946 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd")) { 947 Type *I32Ty = Type::getInt32Ty(C); 948 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 949 ConstantInt::get(I32Ty, 0)); 950 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 951 ConstantInt::get(I32Ty, 0)); 952 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 953 Builder.CreateFAdd(Elt0, Elt1), 954 ConstantInt::get(I32Ty, 0)); 955 } else if (IsX86 && (Name == "sse.sub.ss" || Name == "sse2.sub.sd")) { 956 Type *I32Ty = Type::getInt32Ty(C); 957 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 958 ConstantInt::get(I32Ty, 0)); 959 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 960 ConstantInt::get(I32Ty, 0)); 961 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 962 Builder.CreateFSub(Elt0, Elt1), 963 ConstantInt::get(I32Ty, 0)); 964 } else if (IsX86 && (Name == "sse.mul.ss" || Name == "sse2.mul.sd")) { 965 Type *I32Ty = Type::getInt32Ty(C); 966 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 967 ConstantInt::get(I32Ty, 0)); 968 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 969 ConstantInt::get(I32Ty, 0)); 970 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 971 Builder.CreateFMul(Elt0, Elt1), 972 ConstantInt::get(I32Ty, 0)); 973 } else if (IsX86 && (Name == "sse.div.ss" || Name == "sse2.div.sd")) { 974 Type *I32Ty = Type::getInt32Ty(C); 975 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 976 ConstantInt::get(I32Ty, 0)); 977 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 978 ConstantInt::get(I32Ty, 0)); 979 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 980 Builder.CreateFDiv(Elt0, Elt1), 981 ConstantInt::get(I32Ty, 0)); 982 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 983 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 984 bool CmpEq = Name[16] == 'e'; 985 Rep = upgradeMaskedCompare(Builder, *CI, 986 CmpEq ? ICmpInst::ICMP_EQ 987 : ICmpInst::ICMP_SGT); 988 } else if (IsX86 && (Name == "sse41.pmaxsb" || 989 Name == "sse2.pmaxs.w" || 990 Name == "sse41.pmaxsd" || 991 Name.startswith("avx2.pmaxs") || 992 Name.startswith("avx512.mask.pmaxs"))) { 993 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 994 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 995 Name == "sse41.pmaxuw" || 996 Name == "sse41.pmaxud" || 997 Name.startswith("avx2.pmaxu") || 998 Name.startswith("avx512.mask.pmaxu"))) { 999 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 1000 } else if (IsX86 && (Name == "sse41.pminsb" || 1001 Name == "sse2.pmins.w" || 1002 Name == "sse41.pminsd" || 1003 Name.startswith("avx2.pmins") || 1004 Name.startswith("avx512.mask.pmins"))) { 1005 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 1006 } else if (IsX86 && (Name == "sse2.pminu.b" || 1007 Name == "sse41.pminuw" || 1008 Name == "sse41.pminud" || 1009 Name.startswith("avx2.pminu") || 1010 Name.startswith("avx512.mask.pminu"))) { 1011 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 1012 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 1013 Name == "sse2.cvtps2pd" || 1014 Name == "avx.cvtdq2.pd.256" || 1015 Name == "avx.cvt.ps2.pd.256" || 1016 Name.startswith("avx512.mask.cvtdq2pd.") || 1017 Name.startswith("avx512.mask.cvtudq2pd."))) { 1018 // Lossless i32/float to double conversion. 1019 // Extract the bottom elements if necessary and convert to double vector. 1020 Value *Src = CI->getArgOperand(0); 1021 VectorType *SrcTy = cast<VectorType>(Src->getType()); 1022 VectorType *DstTy = cast<VectorType>(CI->getType()); 1023 Rep = CI->getArgOperand(0); 1024 1025 unsigned NumDstElts = DstTy->getNumElements(); 1026 if (NumDstElts < SrcTy->getNumElements()) { 1027 assert(NumDstElts == 2 && "Unexpected vector size"); 1028 uint32_t ShuffleMask[2] = { 0, 1 }; 1029 Rep = Builder.CreateShuffleVector(Rep, UndefValue::get(SrcTy), 1030 ShuffleMask); 1031 } 1032 1033 bool SInt2Double = (StringRef::npos != Name.find("cvtdq2")); 1034 bool UInt2Double = (StringRef::npos != Name.find("cvtudq2")); 1035 if (SInt2Double) 1036 Rep = Builder.CreateSIToFP(Rep, DstTy, "cvtdq2pd"); 1037 else if (UInt2Double) 1038 Rep = Builder.CreateUIToFP(Rep, DstTy, "cvtudq2pd"); 1039 else 1040 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 1041 1042 if (CI->getNumArgOperands() == 3) 1043 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1044 CI->getArgOperand(1)); 1045 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 1046 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1047 CI->getArgOperand(1), CI->getArgOperand(2), 1048 /*Aligned*/false); 1049 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 1050 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1051 CI->getArgOperand(1),CI->getArgOperand(2), 1052 /*Aligned*/true); 1053 } else if (IsX86 && Name.startswith("xop.vpcom")) { 1054 Intrinsic::ID intID; 1055 if (Name.endswith("ub")) 1056 intID = Intrinsic::x86_xop_vpcomub; 1057 else if (Name.endswith("uw")) 1058 intID = Intrinsic::x86_xop_vpcomuw; 1059 else if (Name.endswith("ud")) 1060 intID = Intrinsic::x86_xop_vpcomud; 1061 else if (Name.endswith("uq")) 1062 intID = Intrinsic::x86_xop_vpcomuq; 1063 else if (Name.endswith("b")) 1064 intID = Intrinsic::x86_xop_vpcomb; 1065 else if (Name.endswith("w")) 1066 intID = Intrinsic::x86_xop_vpcomw; 1067 else if (Name.endswith("d")) 1068 intID = Intrinsic::x86_xop_vpcomd; 1069 else if (Name.endswith("q")) 1070 intID = Intrinsic::x86_xop_vpcomq; 1071 else 1072 llvm_unreachable("Unknown suffix"); 1073 1074 Name = Name.substr(9); // strip off "xop.vpcom" 1075 unsigned Imm; 1076 if (Name.startswith("lt")) 1077 Imm = 0; 1078 else if (Name.startswith("le")) 1079 Imm = 1; 1080 else if (Name.startswith("gt")) 1081 Imm = 2; 1082 else if (Name.startswith("ge")) 1083 Imm = 3; 1084 else if (Name.startswith("eq")) 1085 Imm = 4; 1086 else if (Name.startswith("ne")) 1087 Imm = 5; 1088 else if (Name.startswith("false")) 1089 Imm = 6; 1090 else if (Name.startswith("true")) 1091 Imm = 7; 1092 else 1093 llvm_unreachable("Unknown condition"); 1094 1095 Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID); 1096 Rep = 1097 Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1), 1098 Builder.getInt8(Imm)}); 1099 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 1100 Value *Sel = CI->getArgOperand(2); 1101 Value *NotSel = Builder.CreateNot(Sel); 1102 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 1103 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 1104 Rep = Builder.CreateOr(Sel0, Sel1); 1105 } else if (IsX86 && Name == "sse42.crc32.64.8") { 1106 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 1107 Intrinsic::x86_sse42_crc32_32_8); 1108 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 1109 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 1110 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 1111 } else if (IsX86 && Name.startswith("avx.vbroadcast.s")) { 1112 // Replace broadcasts with a series of insertelements. 1113 Type *VecTy = CI->getType(); 1114 Type *EltTy = VecTy->getVectorElementType(); 1115 unsigned EltNum = VecTy->getVectorNumElements(); 1116 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 1117 EltTy->getPointerTo()); 1118 Value *Load = Builder.CreateLoad(EltTy, Cast); 1119 Type *I32Ty = Type::getInt32Ty(C); 1120 Rep = UndefValue::get(VecTy); 1121 for (unsigned I = 0; I < EltNum; ++I) 1122 Rep = Builder.CreateInsertElement(Rep, Load, 1123 ConstantInt::get(I32Ty, I)); 1124 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 1125 Name.startswith("sse41.pmovzx") || 1126 Name.startswith("avx2.pmovsx") || 1127 Name.startswith("avx2.pmovzx") || 1128 Name.startswith("avx512.mask.pmovsx") || 1129 Name.startswith("avx512.mask.pmovzx"))) { 1130 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 1131 VectorType *DstTy = cast<VectorType>(CI->getType()); 1132 unsigned NumDstElts = DstTy->getNumElements(); 1133 1134 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 1135 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 1136 for (unsigned i = 0; i != NumDstElts; ++i) 1137 ShuffleMask[i] = i; 1138 1139 Value *SV = Builder.CreateShuffleVector( 1140 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 1141 1142 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 1143 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 1144 : Builder.CreateZExt(SV, DstTy); 1145 // If there are 3 arguments, it's a masked intrinsic so we need a select. 1146 if (CI->getNumArgOperands() == 3) 1147 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1148 CI->getArgOperand(1)); 1149 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 1150 Name == "avx2.vbroadcasti128")) { 1151 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 1152 Type *EltTy = CI->getType()->getVectorElementType(); 1153 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 1154 Type *VT = VectorType::get(EltTy, NumSrcElts); 1155 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 1156 PointerType::getUnqual(VT)); 1157 Value *Load = Builder.CreateAlignedLoad(Op, 1); 1158 if (NumSrcElts == 2) 1159 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 1160 { 0, 1, 0, 1 }); 1161 else 1162 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 1163 { 0, 1, 2, 3, 0, 1, 2, 3 }); 1164 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 1165 Name.startswith("avx2.vbroadcast") || 1166 Name.startswith("avx512.pbroadcast") || 1167 Name.startswith("avx512.mask.broadcast.s"))) { 1168 // Replace vp?broadcasts with a vector shuffle. 1169 Value *Op = CI->getArgOperand(0); 1170 unsigned NumElts = CI->getType()->getVectorNumElements(); 1171 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 1172 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 1173 Constant::getNullValue(MaskTy)); 1174 1175 if (CI->getNumArgOperands() == 3) 1176 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1177 CI->getArgOperand(1)); 1178 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 1179 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 1180 CI->getArgOperand(1), 1181 CI->getArgOperand(2), 1182 CI->getArgOperand(3), 1183 CI->getArgOperand(4), 1184 false); 1185 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 1186 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 1187 CI->getArgOperand(1), 1188 CI->getArgOperand(2), 1189 CI->getArgOperand(3), 1190 CI->getArgOperand(4), 1191 true); 1192 } else if (IsX86 && (Name == "sse2.psll.dq" || 1193 Name == "avx2.psll.dq")) { 1194 // 128/256-bit shift left specified in bits. 1195 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1196 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 1197 Shift / 8); // Shift is in bits. 1198 } else if (IsX86 && (Name == "sse2.psrl.dq" || 1199 Name == "avx2.psrl.dq")) { 1200 // 128/256-bit shift right specified in bits. 1201 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1202 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 1203 Shift / 8); // Shift is in bits. 1204 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 1205 Name == "avx2.psll.dq.bs" || 1206 Name == "avx512.psll.dq.512")) { 1207 // 128/256/512-bit shift left specified in bytes. 1208 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1209 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 1210 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 1211 Name == "avx2.psrl.dq.bs" || 1212 Name == "avx512.psrl.dq.512")) { 1213 // 128/256/512-bit shift right specified in bytes. 1214 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1215 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 1216 } else if (IsX86 && (Name == "sse41.pblendw" || 1217 Name.startswith("sse41.blendp") || 1218 Name.startswith("avx.blend.p") || 1219 Name == "avx2.pblendw" || 1220 Name.startswith("avx2.pblendd."))) { 1221 Value *Op0 = CI->getArgOperand(0); 1222 Value *Op1 = CI->getArgOperand(1); 1223 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1224 VectorType *VecTy = cast<VectorType>(CI->getType()); 1225 unsigned NumElts = VecTy->getNumElements(); 1226 1227 SmallVector<uint32_t, 16> Idxs(NumElts); 1228 for (unsigned i = 0; i != NumElts; ++i) 1229 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 1230 1231 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 1232 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 1233 Name == "avx2.vinserti128" || 1234 Name.startswith("avx512.mask.insert"))) { 1235 Value *Op0 = CI->getArgOperand(0); 1236 Value *Op1 = CI->getArgOperand(1); 1237 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1238 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 1239 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 1240 unsigned Scale = DstNumElts / SrcNumElts; 1241 1242 // Mask off the high bits of the immediate value; hardware ignores those. 1243 Imm = Imm % Scale; 1244 1245 // Extend the second operand into a vector the size of the destination. 1246 Value *UndefV = UndefValue::get(Op1->getType()); 1247 SmallVector<uint32_t, 8> Idxs(DstNumElts); 1248 for (unsigned i = 0; i != SrcNumElts; ++i) 1249 Idxs[i] = i; 1250 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 1251 Idxs[i] = SrcNumElts; 1252 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 1253 1254 // Insert the second operand into the first operand. 1255 1256 // Note that there is no guarantee that instruction lowering will actually 1257 // produce a vinsertf128 instruction for the created shuffles. In 1258 // particular, the 0 immediate case involves no lane changes, so it can 1259 // be handled as a blend. 1260 1261 // Example of shuffle mask for 32-bit elements: 1262 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 1263 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 1264 1265 // First fill with identify mask. 1266 for (unsigned i = 0; i != DstNumElts; ++i) 1267 Idxs[i] = i; 1268 // Then replace the elements where we need to insert. 1269 for (unsigned i = 0; i != SrcNumElts; ++i) 1270 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 1271 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 1272 1273 // If the intrinsic has a mask operand, handle that. 1274 if (CI->getNumArgOperands() == 5) 1275 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 1276 CI->getArgOperand(3)); 1277 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 1278 Name == "avx2.vextracti128" || 1279 Name.startswith("avx512.mask.vextract"))) { 1280 Value *Op0 = CI->getArgOperand(0); 1281 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1282 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 1283 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 1284 unsigned Scale = SrcNumElts / DstNumElts; 1285 1286 // Mask off the high bits of the immediate value; hardware ignores those. 1287 Imm = Imm % Scale; 1288 1289 // Get indexes for the subvector of the input vector. 1290 SmallVector<uint32_t, 8> Idxs(DstNumElts); 1291 for (unsigned i = 0; i != DstNumElts; ++i) { 1292 Idxs[i] = i + (Imm * DstNumElts); 1293 } 1294 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1295 1296 // If the intrinsic has a mask operand, handle that. 1297 if (CI->getNumArgOperands() == 4) 1298 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1299 CI->getArgOperand(2)); 1300 } else if (!IsX86 && Name == "stackprotectorcheck") { 1301 Rep = nullptr; 1302 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 1303 Name.startswith("avx512.mask.perm.di."))) { 1304 Value *Op0 = CI->getArgOperand(0); 1305 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1306 VectorType *VecTy = cast<VectorType>(CI->getType()); 1307 unsigned NumElts = VecTy->getNumElements(); 1308 1309 SmallVector<uint32_t, 8> Idxs(NumElts); 1310 for (unsigned i = 0; i != NumElts; ++i) 1311 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 1312 1313 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1314 1315 if (CI->getNumArgOperands() == 4) 1316 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1317 CI->getArgOperand(2)); 1318 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 1319 Name == "sse2.pshuf.d" || 1320 Name.startswith("avx512.mask.vpermil.p") || 1321 Name.startswith("avx512.mask.pshuf.d."))) { 1322 Value *Op0 = CI->getArgOperand(0); 1323 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1324 VectorType *VecTy = cast<VectorType>(CI->getType()); 1325 unsigned NumElts = VecTy->getNumElements(); 1326 // Calculate the size of each index in the immediate. 1327 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 1328 unsigned IdxMask = ((1 << IdxSize) - 1); 1329 1330 SmallVector<uint32_t, 8> Idxs(NumElts); 1331 // Lookup the bits for this element, wrapping around the immediate every 1332 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 1333 // to offset by the first index of each group. 1334 for (unsigned i = 0; i != NumElts; ++i) 1335 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 1336 1337 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1338 1339 if (CI->getNumArgOperands() == 4) 1340 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1341 CI->getArgOperand(2)); 1342 } else if (IsX86 && (Name == "sse2.pshufl.w" || 1343 Name.startswith("avx512.mask.pshufl.w."))) { 1344 Value *Op0 = CI->getArgOperand(0); 1345 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1346 unsigned NumElts = CI->getType()->getVectorNumElements(); 1347 1348 SmallVector<uint32_t, 16> Idxs(NumElts); 1349 for (unsigned l = 0; l != NumElts; l += 8) { 1350 for (unsigned i = 0; i != 4; ++i) 1351 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 1352 for (unsigned i = 4; i != 8; ++i) 1353 Idxs[i + l] = i + l; 1354 } 1355 1356 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1357 1358 if (CI->getNumArgOperands() == 4) 1359 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1360 CI->getArgOperand(2)); 1361 } else if (IsX86 && (Name == "sse2.pshufh.w" || 1362 Name.startswith("avx512.mask.pshufh.w."))) { 1363 Value *Op0 = CI->getArgOperand(0); 1364 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1365 unsigned NumElts = CI->getType()->getVectorNumElements(); 1366 1367 SmallVector<uint32_t, 16> Idxs(NumElts); 1368 for (unsigned l = 0; l != NumElts; l += 8) { 1369 for (unsigned i = 0; i != 4; ++i) 1370 Idxs[i + l] = i + l; 1371 for (unsigned i = 0; i != 4; ++i) 1372 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 1373 } 1374 1375 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1376 1377 if (CI->getNumArgOperands() == 4) 1378 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1379 CI->getArgOperand(2)); 1380 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 1381 Value *Op0 = CI->getArgOperand(0); 1382 Value *Op1 = CI->getArgOperand(1); 1383 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1384 unsigned NumElts = CI->getType()->getVectorNumElements(); 1385 1386 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 1387 unsigned HalfLaneElts = NumLaneElts / 2; 1388 1389 SmallVector<uint32_t, 16> Idxs(NumElts); 1390 for (unsigned i = 0; i != NumElts; ++i) { 1391 // Base index is the starting element of the lane. 1392 Idxs[i] = i - (i % NumLaneElts); 1393 // If we are half way through the lane switch to the other source. 1394 if ((i % NumLaneElts) >= HalfLaneElts) 1395 Idxs[i] += NumElts; 1396 // Now select the specific element. By adding HalfLaneElts bits from 1397 // the immediate. Wrapping around the immediate every 8-bits. 1398 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 1399 } 1400 1401 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 1402 1403 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 1404 CI->getArgOperand(3)); 1405 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 1406 Name.startswith("avx512.mask.movshdup") || 1407 Name.startswith("avx512.mask.movsldup"))) { 1408 Value *Op0 = CI->getArgOperand(0); 1409 unsigned NumElts = CI->getType()->getVectorNumElements(); 1410 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 1411 1412 unsigned Offset = 0; 1413 if (Name.startswith("avx512.mask.movshdup.")) 1414 Offset = 1; 1415 1416 SmallVector<uint32_t, 16> Idxs(NumElts); 1417 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 1418 for (unsigned i = 0; i != NumLaneElts; i += 2) { 1419 Idxs[i + l + 0] = i + l + Offset; 1420 Idxs[i + l + 1] = i + l + Offset; 1421 } 1422 1423 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 1424 1425 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1426 CI->getArgOperand(1)); 1427 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 1428 Name.startswith("avx512.mask.unpckl."))) { 1429 Value *Op0 = CI->getArgOperand(0); 1430 Value *Op1 = CI->getArgOperand(1); 1431 int NumElts = CI->getType()->getVectorNumElements(); 1432 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 1433 1434 SmallVector<uint32_t, 64> Idxs(NumElts); 1435 for (int l = 0; l != NumElts; l += NumLaneElts) 1436 for (int i = 0; i != NumLaneElts; ++i) 1437 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 1438 1439 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 1440 1441 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1442 CI->getArgOperand(2)); 1443 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 1444 Name.startswith("avx512.mask.unpckh."))) { 1445 Value *Op0 = CI->getArgOperand(0); 1446 Value *Op1 = CI->getArgOperand(1); 1447 int NumElts = CI->getType()->getVectorNumElements(); 1448 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 1449 1450 SmallVector<uint32_t, 64> Idxs(NumElts); 1451 for (int l = 0; l != NumElts; l += NumLaneElts) 1452 for (int i = 0; i != NumLaneElts; ++i) 1453 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 1454 1455 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 1456 1457 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1458 CI->getArgOperand(2)); 1459 } else if (IsX86 && Name.startswith("avx512.mask.pand.")) { 1460 Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1)); 1461 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1462 CI->getArgOperand(2)); 1463 } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) { 1464 Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)), 1465 CI->getArgOperand(1)); 1466 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1467 CI->getArgOperand(2)); 1468 } else if (IsX86 && Name.startswith("avx512.mask.por.")) { 1469 Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1)); 1470 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1471 CI->getArgOperand(2)); 1472 } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) { 1473 Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1)); 1474 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1475 CI->getArgOperand(2)); 1476 } else if (IsX86 && Name.startswith("avx512.mask.and.")) { 1477 VectorType *FTy = cast<VectorType>(CI->getType()); 1478 VectorType *ITy = VectorType::getInteger(FTy); 1479 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 1480 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 1481 Rep = Builder.CreateBitCast(Rep, FTy); 1482 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1483 CI->getArgOperand(2)); 1484 } else if (IsX86 && Name.startswith("avx512.mask.andn.")) { 1485 VectorType *FTy = cast<VectorType>(CI->getType()); 1486 VectorType *ITy = VectorType::getInteger(FTy); 1487 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 1488 Rep = Builder.CreateAnd(Rep, 1489 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 1490 Rep = Builder.CreateBitCast(Rep, FTy); 1491 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1492 CI->getArgOperand(2)); 1493 } else if (IsX86 && Name.startswith("avx512.mask.or.")) { 1494 VectorType *FTy = cast<VectorType>(CI->getType()); 1495 VectorType *ITy = VectorType::getInteger(FTy); 1496 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 1497 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 1498 Rep = Builder.CreateBitCast(Rep, FTy); 1499 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1500 CI->getArgOperand(2)); 1501 } else if (IsX86 && Name.startswith("avx512.mask.xor.")) { 1502 VectorType *FTy = cast<VectorType>(CI->getType()); 1503 VectorType *ITy = VectorType::getInteger(FTy); 1504 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 1505 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 1506 Rep = Builder.CreateBitCast(Rep, FTy); 1507 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1508 CI->getArgOperand(2)); 1509 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 1510 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 1511 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1512 CI->getArgOperand(2)); 1513 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 1514 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 1515 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1516 CI->getArgOperand(2)); 1517 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 1518 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 1519 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1520 CI->getArgOperand(2)); 1521 } else if (IsX86 && (Name.startswith("avx512.mask.add.p"))) { 1522 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 1523 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1524 CI->getArgOperand(2)); 1525 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 1526 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 1527 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1528 CI->getArgOperand(2)); 1529 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 1530 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 1531 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1532 CI->getArgOperand(2)); 1533 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 1534 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 1535 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1536 CI->getArgOperand(2)); 1537 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 1538 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1539 Intrinsic::ctlz, 1540 CI->getType()), 1541 { CI->getArgOperand(0), Builder.getInt1(false) }); 1542 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1543 CI->getArgOperand(1)); 1544 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") || 1545 Name.startswith("avx512.mask.min.p"))) { 1546 bool IsMin = Name[13] == 'i'; 1547 VectorType *VecTy = cast<VectorType>(CI->getType()); 1548 unsigned VecWidth = VecTy->getPrimitiveSizeInBits(); 1549 unsigned EltWidth = VecTy->getScalarSizeInBits(); 1550 Intrinsic::ID IID; 1551 if (!IsMin && VecWidth == 128 && EltWidth == 32) 1552 IID = Intrinsic::x86_sse_max_ps; 1553 else if (!IsMin && VecWidth == 128 && EltWidth == 64) 1554 IID = Intrinsic::x86_sse2_max_pd; 1555 else if (!IsMin && VecWidth == 256 && EltWidth == 32) 1556 IID = Intrinsic::x86_avx_max_ps_256; 1557 else if (!IsMin && VecWidth == 256 && EltWidth == 64) 1558 IID = Intrinsic::x86_avx_max_pd_256; 1559 else if (IsMin && VecWidth == 128 && EltWidth == 32) 1560 IID = Intrinsic::x86_sse_min_ps; 1561 else if (IsMin && VecWidth == 128 && EltWidth == 64) 1562 IID = Intrinsic::x86_sse2_min_pd; 1563 else if (IsMin && VecWidth == 256 && EltWidth == 32) 1564 IID = Intrinsic::x86_avx_min_ps_256; 1565 else if (IsMin && VecWidth == 256 && EltWidth == 64) 1566 IID = Intrinsic::x86_avx_min_pd_256; 1567 else 1568 llvm_unreachable("Unexpected intrinsic"); 1569 1570 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1571 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1572 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1573 CI->getArgOperand(2)); 1574 } else if (IsX86 && Name.startswith("avx512.mask.pshuf.b.")) { 1575 VectorType *VecTy = cast<VectorType>(CI->getType()); 1576 Intrinsic::ID IID; 1577 if (VecTy->getPrimitiveSizeInBits() == 128) 1578 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1579 else if (VecTy->getPrimitiveSizeInBits() == 256) 1580 IID = Intrinsic::x86_avx2_pshuf_b; 1581 else if (VecTy->getPrimitiveSizeInBits() == 512) 1582 IID = Intrinsic::x86_avx512_pshuf_b_512; 1583 else 1584 llvm_unreachable("Unexpected intrinsic"); 1585 1586 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1587 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1588 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1589 CI->getArgOperand(2)); 1590 } else if (IsX86 && (Name.startswith("avx512.mask.pmul.dq.") || 1591 Name.startswith("avx512.mask.pmulu.dq."))) { 1592 bool IsUnsigned = Name[16] == 'u'; 1593 VectorType *VecTy = cast<VectorType>(CI->getType()); 1594 Intrinsic::ID IID; 1595 if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 128) 1596 IID = Intrinsic::x86_sse41_pmuldq; 1597 else if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 256) 1598 IID = Intrinsic::x86_avx2_pmul_dq; 1599 else if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 512) 1600 IID = Intrinsic::x86_avx512_pmul_dq_512; 1601 else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 128) 1602 IID = Intrinsic::x86_sse2_pmulu_dq; 1603 else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 256) 1604 IID = Intrinsic::x86_avx2_pmulu_dq; 1605 else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 512) 1606 IID = Intrinsic::x86_avx512_pmulu_dq_512; 1607 else 1608 llvm_unreachable("Unexpected intrinsic"); 1609 1610 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1611 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1612 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1613 CI->getArgOperand(2)); 1614 } else if (IsX86 && Name.startswith("avx512.mask.pack")) { 1615 bool IsUnsigned = Name[16] == 'u'; 1616 bool IsDW = Name[18] == 'd'; 1617 VectorType *VecTy = cast<VectorType>(CI->getType()); 1618 Intrinsic::ID IID; 1619 if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 128) 1620 IID = Intrinsic::x86_sse2_packsswb_128; 1621 else if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 256) 1622 IID = Intrinsic::x86_avx2_packsswb; 1623 else if (!IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 512) 1624 IID = Intrinsic::x86_avx512_packsswb_512; 1625 else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 128) 1626 IID = Intrinsic::x86_sse2_packssdw_128; 1627 else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 256) 1628 IID = Intrinsic::x86_avx2_packssdw; 1629 else if (!IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 512) 1630 IID = Intrinsic::x86_avx512_packssdw_512; 1631 else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 128) 1632 IID = Intrinsic::x86_sse2_packuswb_128; 1633 else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 256) 1634 IID = Intrinsic::x86_avx2_packuswb; 1635 else if (IsUnsigned && !IsDW && VecTy->getPrimitiveSizeInBits() == 512) 1636 IID = Intrinsic::x86_avx512_packuswb_512; 1637 else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 128) 1638 IID = Intrinsic::x86_sse41_packusdw; 1639 else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 256) 1640 IID = Intrinsic::x86_avx2_packusdw; 1641 else if (IsUnsigned && IsDW && VecTy->getPrimitiveSizeInBits() == 512) 1642 IID = Intrinsic::x86_avx512_packusdw_512; 1643 else 1644 llvm_unreachable("Unexpected intrinsic"); 1645 1646 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1647 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1648 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1649 CI->getArgOperand(2)); 1650 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 1651 bool IsImmediate = Name[16] == 'i' || 1652 (Name.size() > 18 && Name[18] == 'i'); 1653 bool IsVariable = Name[16] == 'v'; 1654 char Size = Name[16] == '.' ? Name[17] : 1655 Name[17] == '.' ? Name[18] : 1656 Name[18] == '.' ? Name[19] : 1657 Name[20]; 1658 1659 Intrinsic::ID IID; 1660 if (IsVariable && Name[17] != '.') { 1661 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 1662 IID = Intrinsic::x86_avx2_psllv_q; 1663 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 1664 IID = Intrinsic::x86_avx2_psllv_q_256; 1665 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 1666 IID = Intrinsic::x86_avx2_psllv_d; 1667 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 1668 IID = Intrinsic::x86_avx2_psllv_d_256; 1669 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 1670 IID = Intrinsic::x86_avx512_psllv_w_128; 1671 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 1672 IID = Intrinsic::x86_avx512_psllv_w_256; 1673 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 1674 IID = Intrinsic::x86_avx512_psllv_w_512; 1675 else 1676 llvm_unreachable("Unexpected size"); 1677 } else if (Name.endswith(".128")) { 1678 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 1679 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 1680 : Intrinsic::x86_sse2_psll_d; 1681 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 1682 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 1683 : Intrinsic::x86_sse2_psll_q; 1684 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 1685 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 1686 : Intrinsic::x86_sse2_psll_w; 1687 else 1688 llvm_unreachable("Unexpected size"); 1689 } else if (Name.endswith(".256")) { 1690 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 1691 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 1692 : Intrinsic::x86_avx2_psll_d; 1693 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 1694 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 1695 : Intrinsic::x86_avx2_psll_q; 1696 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 1697 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 1698 : Intrinsic::x86_avx2_psll_w; 1699 else 1700 llvm_unreachable("Unexpected size"); 1701 } else { 1702 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 1703 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 1704 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 1705 Intrinsic::x86_avx512_psll_d_512; 1706 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 1707 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 1708 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 1709 Intrinsic::x86_avx512_psll_q_512; 1710 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 1711 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 1712 : Intrinsic::x86_avx512_psll_w_512; 1713 else 1714 llvm_unreachable("Unexpected size"); 1715 } 1716 1717 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 1718 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 1719 bool IsImmediate = Name[16] == 'i' || 1720 (Name.size() > 18 && Name[18] == 'i'); 1721 bool IsVariable = Name[16] == 'v'; 1722 char Size = Name[16] == '.' ? Name[17] : 1723 Name[17] == '.' ? Name[18] : 1724 Name[18] == '.' ? Name[19] : 1725 Name[20]; 1726 1727 Intrinsic::ID IID; 1728 if (IsVariable && Name[17] != '.') { 1729 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 1730 IID = Intrinsic::x86_avx2_psrlv_q; 1731 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 1732 IID = Intrinsic::x86_avx2_psrlv_q_256; 1733 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 1734 IID = Intrinsic::x86_avx2_psrlv_d; 1735 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 1736 IID = Intrinsic::x86_avx2_psrlv_d_256; 1737 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 1738 IID = Intrinsic::x86_avx512_psrlv_w_128; 1739 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 1740 IID = Intrinsic::x86_avx512_psrlv_w_256; 1741 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 1742 IID = Intrinsic::x86_avx512_psrlv_w_512; 1743 else 1744 llvm_unreachable("Unexpected size"); 1745 } else if (Name.endswith(".128")) { 1746 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 1747 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 1748 : Intrinsic::x86_sse2_psrl_d; 1749 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 1750 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 1751 : Intrinsic::x86_sse2_psrl_q; 1752 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 1753 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 1754 : Intrinsic::x86_sse2_psrl_w; 1755 else 1756 llvm_unreachable("Unexpected size"); 1757 } else if (Name.endswith(".256")) { 1758 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 1759 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 1760 : Intrinsic::x86_avx2_psrl_d; 1761 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 1762 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 1763 : Intrinsic::x86_avx2_psrl_q; 1764 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 1765 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 1766 : Intrinsic::x86_avx2_psrl_w; 1767 else 1768 llvm_unreachable("Unexpected size"); 1769 } else { 1770 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 1771 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 1772 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 1773 Intrinsic::x86_avx512_psrl_d_512; 1774 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 1775 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 1776 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 1777 Intrinsic::x86_avx512_psrl_q_512; 1778 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 1779 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 1780 : Intrinsic::x86_avx512_psrl_w_512; 1781 else 1782 llvm_unreachable("Unexpected size"); 1783 } 1784 1785 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 1786 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 1787 bool IsImmediate = Name[16] == 'i' || 1788 (Name.size() > 18 && Name[18] == 'i'); 1789 bool IsVariable = Name[16] == 'v'; 1790 char Size = Name[16] == '.' ? Name[17] : 1791 Name[17] == '.' ? Name[18] : 1792 Name[18] == '.' ? Name[19] : 1793 Name[20]; 1794 1795 Intrinsic::ID IID; 1796 if (IsVariable && Name[17] != '.') { 1797 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 1798 IID = Intrinsic::x86_avx2_psrav_d; 1799 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 1800 IID = Intrinsic::x86_avx2_psrav_d_256; 1801 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 1802 IID = Intrinsic::x86_avx512_psrav_w_128; 1803 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 1804 IID = Intrinsic::x86_avx512_psrav_w_256; 1805 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 1806 IID = Intrinsic::x86_avx512_psrav_w_512; 1807 else 1808 llvm_unreachable("Unexpected size"); 1809 } else if (Name.endswith(".128")) { 1810 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 1811 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 1812 : Intrinsic::x86_sse2_psra_d; 1813 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 1814 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 1815 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 1816 Intrinsic::x86_avx512_psra_q_128; 1817 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 1818 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 1819 : Intrinsic::x86_sse2_psra_w; 1820 else 1821 llvm_unreachable("Unexpected size"); 1822 } else if (Name.endswith(".256")) { 1823 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 1824 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 1825 : Intrinsic::x86_avx2_psra_d; 1826 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 1827 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 1828 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 1829 Intrinsic::x86_avx512_psra_q_256; 1830 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 1831 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 1832 : Intrinsic::x86_avx2_psra_w; 1833 else 1834 llvm_unreachable("Unexpected size"); 1835 } else { 1836 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 1837 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 1838 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 1839 Intrinsic::x86_avx512_psra_d_512; 1840 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 1841 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 1842 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 1843 Intrinsic::x86_avx512_psra_q_512; 1844 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 1845 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 1846 : Intrinsic::x86_avx512_psra_w_512; 1847 else 1848 llvm_unreachable("Unexpected size"); 1849 } 1850 1851 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 1852 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 1853 Rep = upgradeMaskedMove(Builder, *CI); 1854 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 1855 Rep = UpgradeMaskToInt(Builder, *CI); 1856 } else if (IsX86 && Name.startswith("avx512.mask.vpermilvar.")) { 1857 Intrinsic::ID IID; 1858 if (Name.endswith("ps.128")) 1859 IID = Intrinsic::x86_avx_vpermilvar_ps; 1860 else if (Name.endswith("pd.128")) 1861 IID = Intrinsic::x86_avx_vpermilvar_pd; 1862 else if (Name.endswith("ps.256")) 1863 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 1864 else if (Name.endswith("pd.256")) 1865 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 1866 else if (Name.endswith("ps.512")) 1867 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 1868 else if (Name.endswith("pd.512")) 1869 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 1870 else 1871 llvm_unreachable("Unexpected vpermilvar intrinsic"); 1872 1873 Function *Intrin = Intrinsic::getDeclaration(F->getParent(), IID); 1874 Rep = Builder.CreateCall(Intrin, 1875 { CI->getArgOperand(0), CI->getArgOperand(1) }); 1876 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 1877 CI->getArgOperand(2)); 1878 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 1879 Value *Arg = CI->getArgOperand(0); 1880 Value *Neg = Builder.CreateNeg(Arg, "neg"); 1881 Value *Cmp = Builder.CreateICmpSGE( 1882 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 1883 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 1884 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 1885 Name == "max.ui" || Name == "max.ull")) { 1886 Value *Arg0 = CI->getArgOperand(0); 1887 Value *Arg1 = CI->getArgOperand(1); 1888 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 1889 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 1890 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 1891 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 1892 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 1893 Name == "min.ui" || Name == "min.ull")) { 1894 Value *Arg0 = CI->getArgOperand(0); 1895 Value *Arg1 = CI->getArgOperand(1); 1896 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 1897 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 1898 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 1899 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 1900 } else if (IsNVVM && Name == "clz.ll") { 1901 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 1902 Value *Arg = CI->getArgOperand(0); 1903 Value *Ctlz = Builder.CreateCall( 1904 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 1905 {Arg->getType()}), 1906 {Arg, Builder.getFalse()}, "ctlz"); 1907 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 1908 } else if (IsNVVM && Name == "popc.ll") { 1909 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 1910 // i64. 1911 Value *Arg = CI->getArgOperand(0); 1912 Value *Popc = Builder.CreateCall( 1913 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 1914 {Arg->getType()}), 1915 Arg, "ctpop"); 1916 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 1917 } else if (IsNVVM && Name == "h2f") { 1918 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 1919 F->getParent(), Intrinsic::convert_from_fp16, 1920 {Builder.getFloatTy()}), 1921 CI->getArgOperand(0), "h2f"); 1922 } else { 1923 llvm_unreachable("Unknown function for CallInst upgrade."); 1924 } 1925 1926 if (Rep) 1927 CI->replaceAllUsesWith(Rep); 1928 CI->eraseFromParent(); 1929 return; 1930 } 1931 1932 CallInst *NewCall = nullptr; 1933 switch (NewFn->getIntrinsicID()) { 1934 default: { 1935 // Handle generic mangling change, but nothing else 1936 assert( 1937 (CI->getCalledFunction()->getName() != NewFn->getName()) && 1938 "Unknown function for CallInst upgrade and isn't just a name change"); 1939 CI->setCalledFunction(NewFn); 1940 return; 1941 } 1942 1943 case Intrinsic::arm_neon_vld1: 1944 case Intrinsic::arm_neon_vld2: 1945 case Intrinsic::arm_neon_vld3: 1946 case Intrinsic::arm_neon_vld4: 1947 case Intrinsic::arm_neon_vld2lane: 1948 case Intrinsic::arm_neon_vld3lane: 1949 case Intrinsic::arm_neon_vld4lane: 1950 case Intrinsic::arm_neon_vst1: 1951 case Intrinsic::arm_neon_vst2: 1952 case Intrinsic::arm_neon_vst3: 1953 case Intrinsic::arm_neon_vst4: 1954 case Intrinsic::arm_neon_vst2lane: 1955 case Intrinsic::arm_neon_vst3lane: 1956 case Intrinsic::arm_neon_vst4lane: { 1957 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 1958 CI->arg_operands().end()); 1959 NewCall = Builder.CreateCall(NewFn, Args); 1960 break; 1961 } 1962 1963 case Intrinsic::bitreverse: 1964 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 1965 break; 1966 1967 case Intrinsic::ctlz: 1968 case Intrinsic::cttz: 1969 assert(CI->getNumArgOperands() == 1 && 1970 "Mismatch between function args and call args"); 1971 NewCall = 1972 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 1973 break; 1974 1975 case Intrinsic::objectsize: { 1976 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 1977 ? Builder.getFalse() 1978 : CI->getArgOperand(2); 1979 NewCall = Builder.CreateCall( 1980 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize}); 1981 break; 1982 } 1983 1984 case Intrinsic::ctpop: 1985 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 1986 break; 1987 1988 case Intrinsic::convert_from_fp16: 1989 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 1990 break; 1991 1992 case Intrinsic::x86_xop_vfrcz_ss: 1993 case Intrinsic::x86_xop_vfrcz_sd: 1994 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 1995 break; 1996 1997 case Intrinsic::x86_xop_vpermil2pd: 1998 case Intrinsic::x86_xop_vpermil2ps: 1999 case Intrinsic::x86_xop_vpermil2pd_256: 2000 case Intrinsic::x86_xop_vpermil2ps_256: { 2001 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 2002 CI->arg_operands().end()); 2003 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 2004 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 2005 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 2006 NewCall = Builder.CreateCall(NewFn, Args); 2007 break; 2008 } 2009 2010 case Intrinsic::x86_sse41_ptestc: 2011 case Intrinsic::x86_sse41_ptestz: 2012 case Intrinsic::x86_sse41_ptestnzc: { 2013 // The arguments for these intrinsics used to be v4f32, and changed 2014 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 2015 // So, the only thing required is a bitcast for both arguments. 2016 // First, check the arguments have the old type. 2017 Value *Arg0 = CI->getArgOperand(0); 2018 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 2019 return; 2020 2021 // Old intrinsic, add bitcasts 2022 Value *Arg1 = CI->getArgOperand(1); 2023 2024 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 2025 2026 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 2027 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 2028 2029 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 2030 break; 2031 } 2032 2033 case Intrinsic::x86_sse41_insertps: 2034 case Intrinsic::x86_sse41_dppd: 2035 case Intrinsic::x86_sse41_dpps: 2036 case Intrinsic::x86_sse41_mpsadbw: 2037 case Intrinsic::x86_avx_dp_ps_256: 2038 case Intrinsic::x86_avx2_mpsadbw: { 2039 // Need to truncate the last argument from i32 to i8 -- this argument models 2040 // an inherently 8-bit immediate operand to these x86 instructions. 2041 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 2042 CI->arg_operands().end()); 2043 2044 // Replace the last argument with a trunc. 2045 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 2046 NewCall = Builder.CreateCall(NewFn, Args); 2047 break; 2048 } 2049 2050 case Intrinsic::thread_pointer: { 2051 NewCall = Builder.CreateCall(NewFn, {}); 2052 break; 2053 } 2054 2055 case Intrinsic::invariant_start: 2056 case Intrinsic::invariant_end: 2057 case Intrinsic::masked_load: 2058 case Intrinsic::masked_store: { 2059 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 2060 CI->arg_operands().end()); 2061 NewCall = Builder.CreateCall(NewFn, Args); 2062 break; 2063 } 2064 } 2065 assert(NewCall && "Should have either set this variable or returned through " 2066 "the default case"); 2067 std::string Name = CI->getName(); 2068 if (!Name.empty()) { 2069 CI->setName(Name + ".old"); 2070 NewCall->setName(Name); 2071 } 2072 CI->replaceAllUsesWith(NewCall); 2073 CI->eraseFromParent(); 2074 } 2075 2076 void llvm::UpgradeCallsToIntrinsic(Function *F) { 2077 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 2078 2079 // Check if this function should be upgraded and get the replacement function 2080 // if there is one. 2081 Function *NewFn; 2082 if (UpgradeIntrinsicFunction(F, NewFn)) { 2083 // Replace all users of the old function with the new function or new 2084 // instructions. This is not a range loop because the call is deleted. 2085 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 2086 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 2087 UpgradeIntrinsicCall(CI, NewFn); 2088 2089 // Remove old function, no longer used, from the module. 2090 F->eraseFromParent(); 2091 } 2092 } 2093 2094 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 2095 // Check if the tag uses struct-path aware TBAA format. 2096 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 2097 return &MD; 2098 2099 auto &Context = MD.getContext(); 2100 if (MD.getNumOperands() == 3) { 2101 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 2102 MDNode *ScalarType = MDNode::get(Context, Elts); 2103 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 2104 Metadata *Elts2[] = {ScalarType, ScalarType, 2105 ConstantAsMetadata::get( 2106 Constant::getNullValue(Type::getInt64Ty(Context))), 2107 MD.getOperand(2)}; 2108 return MDNode::get(Context, Elts2); 2109 } 2110 // Create a MDNode <MD, MD, offset 0> 2111 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 2112 Type::getInt64Ty(Context)))}; 2113 return MDNode::get(Context, Elts); 2114 } 2115 2116 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 2117 Instruction *&Temp) { 2118 if (Opc != Instruction::BitCast) 2119 return nullptr; 2120 2121 Temp = nullptr; 2122 Type *SrcTy = V->getType(); 2123 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 2124 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 2125 LLVMContext &Context = V->getContext(); 2126 2127 // We have no information about target data layout, so we assume that 2128 // the maximum pointer size is 64bit. 2129 Type *MidTy = Type::getInt64Ty(Context); 2130 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 2131 2132 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 2133 } 2134 2135 return nullptr; 2136 } 2137 2138 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 2139 if (Opc != Instruction::BitCast) 2140 return nullptr; 2141 2142 Type *SrcTy = C->getType(); 2143 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 2144 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 2145 LLVMContext &Context = C->getContext(); 2146 2147 // We have no information about target data layout, so we assume that 2148 // the maximum pointer size is 64bit. 2149 Type *MidTy = Type::getInt64Ty(Context); 2150 2151 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 2152 DestTy); 2153 } 2154 2155 return nullptr; 2156 } 2157 2158 /// Check the debug info version number, if it is out-dated, drop the debug 2159 /// info. Return true if module is modified. 2160 bool llvm::UpgradeDebugInfo(Module &M) { 2161 unsigned Version = getDebugMetadataVersionFromModule(M); 2162 if (Version == DEBUG_METADATA_VERSION) 2163 return false; 2164 2165 bool RetCode = StripDebugInfo(M); 2166 if (RetCode) { 2167 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 2168 M.getContext().diagnose(DiagVersion); 2169 } 2170 return RetCode; 2171 } 2172 2173 bool llvm::UpgradeModuleFlags(Module &M) { 2174 const NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 2175 if (!ModFlags) 2176 return false; 2177 2178 bool HasObjCFlag = false, HasClassProperties = false; 2179 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 2180 MDNode *Op = ModFlags->getOperand(I); 2181 if (Op->getNumOperands() < 2) 2182 continue; 2183 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 2184 if (!ID) 2185 continue; 2186 if (ID->getString() == "Objective-C Image Info Version") 2187 HasObjCFlag = true; 2188 if (ID->getString() == "Objective-C Class Properties") 2189 HasClassProperties = true; 2190 } 2191 // "Objective-C Class Properties" is recently added for Objective-C. We 2192 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 2193 // flag of value 0, so we can correclty downgrade this flag when trying to 2194 // link an ObjC bitcode without this module flag with an ObjC bitcode with 2195 // this module flag. 2196 if (HasObjCFlag && !HasClassProperties) { 2197 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 2198 (uint32_t)0); 2199 return true; 2200 } 2201 return false; 2202 } 2203 2204 static bool isOldLoopArgument(Metadata *MD) { 2205 auto *T = dyn_cast_or_null<MDTuple>(MD); 2206 if (!T) 2207 return false; 2208 if (T->getNumOperands() < 1) 2209 return false; 2210 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 2211 if (!S) 2212 return false; 2213 return S->getString().startswith("llvm.vectorizer."); 2214 } 2215 2216 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 2217 StringRef OldPrefix = "llvm.vectorizer."; 2218 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 2219 2220 if (OldTag == "llvm.vectorizer.unroll") 2221 return MDString::get(C, "llvm.loop.interleave.count"); 2222 2223 return MDString::get( 2224 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 2225 .str()); 2226 } 2227 2228 static Metadata *upgradeLoopArgument(Metadata *MD) { 2229 auto *T = dyn_cast_or_null<MDTuple>(MD); 2230 if (!T) 2231 return MD; 2232 if (T->getNumOperands() < 1) 2233 return MD; 2234 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 2235 if (!OldTag) 2236 return MD; 2237 if (!OldTag->getString().startswith("llvm.vectorizer.")) 2238 return MD; 2239 2240 // This has an old tag. Upgrade it. 2241 SmallVector<Metadata *, 8> Ops; 2242 Ops.reserve(T->getNumOperands()); 2243 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 2244 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 2245 Ops.push_back(T->getOperand(I)); 2246 2247 return MDTuple::get(T->getContext(), Ops); 2248 } 2249 2250 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 2251 auto *T = dyn_cast<MDTuple>(&N); 2252 if (!T) 2253 return &N; 2254 2255 if (none_of(T->operands(), isOldLoopArgument)) 2256 return &N; 2257 2258 SmallVector<Metadata *, 8> Ops; 2259 Ops.reserve(T->getNumOperands()); 2260 for (Metadata *MD : T->operands()) 2261 Ops.push_back(upgradeLoopArgument(MD)); 2262 2263 return MDTuple::get(T->getContext(), Ops); 2264 } 2265