1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the auto-upgrade helper functions. 11 // This is where deprecated IR intrinsics and other IR features are updated to 12 // current specifications. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/IR/AutoUpgrade.h" 17 #include "llvm/ADT/StringSwitch.h" 18 #include "llvm/IR/Constants.h" 19 #include "llvm/IR/DIBuilder.h" 20 #include "llvm/IR/DebugInfo.h" 21 #include "llvm/IR/DiagnosticInfo.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/IRBuilder.h" 24 #include "llvm/IR/Instruction.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/IR/Module.h" 28 #include "llvm/IR/Verifier.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/Regex.h" 31 #include <cstring> 32 using namespace llvm; 33 34 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 35 36 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 37 // changed their type from v4f32 to v2i64. 38 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 39 Function *&NewFn) { 40 // Check whether this is an old version of the function, which received 41 // v4f32 arguments. 42 Type *Arg0Type = F->getFunctionType()->getParamType(0); 43 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 44 return false; 45 46 // Yes, it's old, replace it with new version. 47 rename(F); 48 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 49 return true; 50 } 51 52 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 53 // arguments have changed their type from i32 to i8. 54 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 55 Function *&NewFn) { 56 // Check that the last argument is an i32. 57 Type *LastArgType = F->getFunctionType()->getParamType( 58 F->getFunctionType()->getNumParams() - 1); 59 if (!LastArgType->isIntegerTy(32)) 60 return false; 61 62 // Move this function aside and map down. 63 rename(F); 64 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 65 return true; 66 } 67 68 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 69 // All of the intrinsics matches below should be marked with which llvm 70 // version started autoupgrading them. At some point in the future we would 71 // like to use this information to remove upgrade code for some older 72 // intrinsics. It is currently undecided how we will determine that future 73 // point. 74 if (Name=="ssse3.pabs.b.128" || // Added in 6.0 75 Name=="ssse3.pabs.w.128" || // Added in 6.0 76 Name=="ssse3.pabs.d.128" || // Added in 6.0 77 Name.startswith("fma.vfmsub.") || // Added in 7.0 78 Name.startswith("fma.vfmsubadd.") || // Added in 7.0 79 Name.startswith("fma.vfnmadd.") || // Added in 7.0 80 Name.startswith("fma.vfnmsub.") || // Added in 7.0 81 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0 82 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0 83 Name.startswith("avx512.kunpck") || //added in 6.0 84 Name.startswith("avx2.pabs.") || // Added in 6.0 85 Name.startswith("avx512.mask.pabs.") || // Added in 6.0 86 Name.startswith("avx512.broadcastm") || // Added in 6.0 87 Name == "sse.sqrt.ss" || // Added in 7.0 88 Name == "sse2.sqrt.sd" || // Added in 7.0 89 Name == "avx512.mask.sqrt.ps.128" || // Added in 7.0 90 Name == "avx512.mask.sqrt.ps.256" || // Added in 7.0 91 Name == "avx512.mask.sqrt.pd.128" || // Added in 7.0 92 Name == "avx512.mask.sqrt.pd.256" || // Added in 7.0 93 Name.startswith("avx.sqrt.p") || // Added in 7.0 94 Name.startswith("sse2.sqrt.p") || // Added in 7.0 95 Name.startswith("sse.sqrt.p") || // Added in 7.0 96 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0 97 Name.startswith("sse2.pcmpeq.") || // Added in 3.1 98 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 99 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 100 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 101 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 102 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 103 Name.startswith("avx.vperm2f128.") || // Added in 6.0 104 Name == "avx2.vperm2i128" || // Added in 6.0 105 Name == "sse.add.ss" || // Added in 4.0 106 Name == "sse2.add.sd" || // Added in 4.0 107 Name == "sse.sub.ss" || // Added in 4.0 108 Name == "sse2.sub.sd" || // Added in 4.0 109 Name == "sse.mul.ss" || // Added in 4.0 110 Name == "sse2.mul.sd" || // Added in 4.0 111 Name == "sse.div.ss" || // Added in 4.0 112 Name == "sse2.div.sd" || // Added in 4.0 113 Name == "sse41.pmaxsb" || // Added in 3.9 114 Name == "sse2.pmaxs.w" || // Added in 3.9 115 Name == "sse41.pmaxsd" || // Added in 3.9 116 Name == "sse2.pmaxu.b" || // Added in 3.9 117 Name == "sse41.pmaxuw" || // Added in 3.9 118 Name == "sse41.pmaxud" || // Added in 3.9 119 Name == "sse41.pminsb" || // Added in 3.9 120 Name == "sse2.pmins.w" || // Added in 3.9 121 Name == "sse41.pminsd" || // Added in 3.9 122 Name == "sse2.pminu.b" || // Added in 3.9 123 Name == "sse41.pminuw" || // Added in 3.9 124 Name == "sse41.pminud" || // Added in 3.9 125 Name == "avx512.kand.w" || // Added in 7.0 126 Name == "avx512.kandn.w" || // Added in 7.0 127 Name == "avx512.knot.w" || // Added in 7.0 128 Name == "avx512.kor.w" || // Added in 7.0 129 Name == "avx512.kxor.w" || // Added in 7.0 130 Name == "avx512.kxnor.w" || // Added in 7.0 131 Name == "avx512.kortestc.w" || // Added in 7.0 132 Name == "avx512.kortestz.w" || // Added in 7.0 133 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 134 Name.startswith("avx2.pmax") || // Added in 3.9 135 Name.startswith("avx2.pmin") || // Added in 3.9 136 Name.startswith("avx512.mask.pmax") || // Added in 4.0 137 Name.startswith("avx512.mask.pmin") || // Added in 4.0 138 Name.startswith("avx2.vbroadcast") || // Added in 3.8 139 Name.startswith("avx2.pbroadcast") || // Added in 3.8 140 Name.startswith("avx.vpermil.") || // Added in 3.1 141 Name.startswith("sse2.pshuf") || // Added in 3.9 142 Name.startswith("avx512.pbroadcast") || // Added in 3.9 143 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 144 Name.startswith("avx512.mask.movddup") || // Added in 3.9 145 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 146 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 147 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 148 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 149 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 150 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 151 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 152 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 153 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 154 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 155 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 156 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 157 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 158 Name.startswith("avx512.mask.pand.") || // Added in 3.9 159 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 160 Name.startswith("avx512.mask.por.") || // Added in 3.9 161 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 162 Name.startswith("avx512.mask.and.") || // Added in 3.9 163 Name.startswith("avx512.mask.andn.") || // Added in 3.9 164 Name.startswith("avx512.mask.or.") || // Added in 3.9 165 Name.startswith("avx512.mask.xor.") || // Added in 3.9 166 Name.startswith("avx512.mask.padd.") || // Added in 4.0 167 Name.startswith("avx512.mask.psub.") || // Added in 4.0 168 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 169 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 170 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 171 Name == "avx512.mask.cvtudq2ps.128" || // Added in 7.0 172 Name == "avx512.mask.cvtudq2ps.256" || // Added in 7.0 173 Name == "avx512.mask.cvtqq2pd.128" || // Added in 7.0 174 Name == "avx512.mask.cvtqq2pd.256" || // Added in 7.0 175 Name == "avx512.mask.cvtuqq2pd.128" || // Added in 7.0 176 Name == "avx512.mask.cvtuqq2pd.256" || // Added in 7.0 177 Name == "avx512.mask.cvtdq2ps.128" || // Added in 7.0 178 Name == "avx512.mask.cvtdq2ps.256" || // Added in 7.0 179 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0 180 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0 181 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0 182 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0 183 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0 184 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0 185 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0 186 Name == "avx512.cvtusi2sd" || // Added in 7.0 187 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 188 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 189 Name == "sse2.pmulu.dq" || // Added in 7.0 190 Name == "sse41.pmuldq" || // Added in 7.0 191 Name == "avx2.pmulu.dq" || // Added in 7.0 192 Name == "avx2.pmul.dq" || // Added in 7.0 193 Name == "avx512.pmulu.dq.512" || // Added in 7.0 194 Name == "avx512.pmul.dq.512" || // Added in 7.0 195 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 196 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 197 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0 198 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0 199 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0 200 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0 201 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0 202 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 203 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 204 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 205 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 206 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0 207 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0 208 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0 209 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0 210 Name.startswith("avx512.mask.cmp.p") || // Added in 7.0 211 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0 212 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0 213 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0 214 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0 215 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0 216 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 217 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 218 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 219 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 220 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 221 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 222 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 223 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 224 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 225 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 226 Name.startswith("avx512.mask.pslli") || // Added in 4.0 227 Name.startswith("avx512.mask.psrai") || // Added in 4.0 228 Name.startswith("avx512.mask.psrli") || // Added in 4.0 229 Name.startswith("avx512.mask.psllv") || // Added in 4.0 230 Name.startswith("avx512.mask.psrav") || // Added in 4.0 231 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 232 Name.startswith("sse41.pmovsx") || // Added in 3.8 233 Name.startswith("sse41.pmovzx") || // Added in 3.9 234 Name.startswith("avx2.pmovsx") || // Added in 3.9 235 Name.startswith("avx2.pmovzx") || // Added in 3.9 236 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 237 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 238 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 239 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0 240 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0 241 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0 242 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0 243 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0 244 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0 245 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0 246 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0 247 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0 248 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0 249 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0 250 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0 251 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0 252 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0 253 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0 254 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0 255 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0 256 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0 257 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0 258 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0 259 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0 260 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0 261 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0 262 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0 263 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0 264 Name == "sse.cvtsi2ss" || // Added in 7.0 265 Name == "sse.cvtsi642ss" || // Added in 7.0 266 Name == "sse2.cvtsi2sd" || // Added in 7.0 267 Name == "sse2.cvtsi642sd" || // Added in 7.0 268 Name == "sse2.cvtss2sd" || // Added in 7.0 269 Name == "sse2.cvtdq2pd" || // Added in 3.9 270 Name == "sse2.cvtdq2ps" || // Added in 7.0 271 Name == "sse2.cvtps2pd" || // Added in 3.9 272 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 273 Name == "avx.cvtdq2.ps.256" || // Added in 7.0 274 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 275 Name.startswith("avx.vinsertf128.") || // Added in 3.7 276 Name == "avx2.vinserti128" || // Added in 3.7 277 Name.startswith("avx512.mask.insert") || // Added in 4.0 278 Name.startswith("avx.vextractf128.") || // Added in 3.7 279 Name == "avx2.vextracti128" || // Added in 3.7 280 Name.startswith("avx512.mask.vextract") || // Added in 4.0 281 Name.startswith("sse4a.movnt.") || // Added in 3.9 282 Name.startswith("avx.movnt.") || // Added in 3.2 283 Name.startswith("avx512.storent.") || // Added in 3.9 284 Name == "sse41.movntdqa" || // Added in 5.0 285 Name == "avx2.movntdqa" || // Added in 5.0 286 Name == "avx512.movntdqa" || // Added in 5.0 287 Name == "sse2.storel.dq" || // Added in 3.9 288 Name.startswith("sse.storeu.") || // Added in 3.9 289 Name.startswith("sse2.storeu.") || // Added in 3.9 290 Name.startswith("avx.storeu.") || // Added in 3.9 291 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 292 Name.startswith("avx512.mask.store.p") || // Added in 3.9 293 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 294 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 295 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 296 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 297 Name == "avx512.mask.store.ss" || // Added in 7.0 298 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 299 Name.startswith("avx512.mask.load.") || // Added in 3.9 300 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0 301 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0 302 Name == "sse42.crc32.64.8" || // Added in 3.4 303 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 304 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0 305 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 306 Name.startswith("avx512.mask.valign.") || // Added in 4.0 307 Name.startswith("sse2.psll.dq") || // Added in 3.7 308 Name.startswith("sse2.psrl.dq") || // Added in 3.7 309 Name.startswith("avx2.psll.dq") || // Added in 3.7 310 Name.startswith("avx2.psrl.dq") || // Added in 3.7 311 Name.startswith("avx512.psll.dq") || // Added in 3.9 312 Name.startswith("avx512.psrl.dq") || // Added in 3.9 313 Name == "sse41.pblendw" || // Added in 3.7 314 Name.startswith("sse41.blendp") || // Added in 3.7 315 Name.startswith("avx.blend.p") || // Added in 3.7 316 Name == "avx2.pblendw" || // Added in 3.7 317 Name.startswith("avx2.pblendd.") || // Added in 3.7 318 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 319 Name == "avx2.vbroadcasti128" || // Added in 3.7 320 Name.startswith("avx512.mask.broadcastf") || // Added in 6.0 321 Name.startswith("avx512.mask.broadcasti") || // Added in 6.0 322 Name == "xop.vpcmov" || // Added in 3.8 323 Name == "xop.vpcmov.256" || // Added in 5.0 324 Name.startswith("avx512.mask.move.s") || // Added in 4.0 325 Name.startswith("avx512.cvtmask2") || // Added in 5.0 326 (Name.startswith("xop.vpcom") && // Added in 3.2 327 F->arg_size() == 2) || 328 Name.startswith("avx512.ptestm") || //Added in 6.0 329 Name.startswith("avx512.ptestnm") || //Added in 6.0 330 Name.startswith("sse2.pavg") || // Added in 6.0 331 Name.startswith("avx2.pavg") || // Added in 6.0 332 Name.startswith("avx512.mask.pavg")) // Added in 6.0 333 return true; 334 335 return false; 336 } 337 338 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 339 Function *&NewFn) { 340 // Only handle intrinsics that start with "x86.". 341 if (!Name.startswith("x86.")) 342 return false; 343 // Remove "x86." prefix. 344 Name = Name.substr(4); 345 346 if (ShouldUpgradeX86Intrinsic(F, Name)) { 347 NewFn = nullptr; 348 return true; 349 } 350 351 // SSE4.1 ptest functions may have an old signature. 352 if (Name.startswith("sse41.ptest")) { // Added in 3.2 353 if (Name.substr(11) == "c") 354 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 355 if (Name.substr(11) == "z") 356 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 357 if (Name.substr(11) == "nzc") 358 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 359 } 360 // Several blend and other instructions with masks used the wrong number of 361 // bits. 362 if (Name == "sse41.insertps") // Added in 3.6 363 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 364 NewFn); 365 if (Name == "sse41.dppd") // Added in 3.6 366 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 367 NewFn); 368 if (Name == "sse41.dpps") // Added in 3.6 369 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 370 NewFn); 371 if (Name == "sse41.mpsadbw") // Added in 3.6 372 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 373 NewFn); 374 if (Name == "avx.dp.ps.256") // Added in 3.6 375 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 376 NewFn); 377 if (Name == "avx2.mpsadbw") // Added in 3.6 378 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 379 NewFn); 380 381 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 382 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 383 rename(F); 384 NewFn = Intrinsic::getDeclaration(F->getParent(), 385 Intrinsic::x86_xop_vfrcz_ss); 386 return true; 387 } 388 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 389 rename(F); 390 NewFn = Intrinsic::getDeclaration(F->getParent(), 391 Intrinsic::x86_xop_vfrcz_sd); 392 return true; 393 } 394 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 395 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 396 auto Idx = F->getFunctionType()->getParamType(2); 397 if (Idx->isFPOrFPVectorTy()) { 398 rename(F); 399 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 400 unsigned EltSize = Idx->getScalarSizeInBits(); 401 Intrinsic::ID Permil2ID; 402 if (EltSize == 64 && IdxSize == 128) 403 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 404 else if (EltSize == 32 && IdxSize == 128) 405 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 406 else if (EltSize == 64 && IdxSize == 256) 407 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 408 else 409 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 410 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 411 return true; 412 } 413 } 414 415 return false; 416 } 417 418 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 419 assert(F && "Illegal to upgrade a non-existent Function."); 420 421 // Quickly eliminate it, if it's not a candidate. 422 StringRef Name = F->getName(); 423 if (Name.size() <= 8 || !Name.startswith("llvm.")) 424 return false; 425 Name = Name.substr(5); // Strip off "llvm." 426 427 switch (Name[0]) { 428 default: break; 429 case 'a': { 430 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 431 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 432 F->arg_begin()->getType()); 433 return true; 434 } 435 if (Name.startswith("arm.neon.vclz")) { 436 Type* args[2] = { 437 F->arg_begin()->getType(), 438 Type::getInt1Ty(F->getContext()) 439 }; 440 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 441 // the end of the name. Change name from llvm.arm.neon.vclz.* to 442 // llvm.ctlz.* 443 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 444 NewFn = Function::Create(fType, F->getLinkage(), 445 "llvm.ctlz." + Name.substr(14), F->getParent()); 446 return true; 447 } 448 if (Name.startswith("arm.neon.vcnt")) { 449 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 450 F->arg_begin()->getType()); 451 return true; 452 } 453 Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 454 if (vldRegex.match(Name)) { 455 auto fArgs = F->getFunctionType()->params(); 456 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 457 // Can't use Intrinsic::getDeclaration here as the return types might 458 // then only be structurally equal. 459 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 460 NewFn = Function::Create(fType, F->getLinkage(), 461 "llvm." + Name + ".p0i8", F->getParent()); 462 return true; 463 } 464 Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 465 if (vstRegex.match(Name)) { 466 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 467 Intrinsic::arm_neon_vst2, 468 Intrinsic::arm_neon_vst3, 469 Intrinsic::arm_neon_vst4}; 470 471 static const Intrinsic::ID StoreLaneInts[] = { 472 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 473 Intrinsic::arm_neon_vst4lane 474 }; 475 476 auto fArgs = F->getFunctionType()->params(); 477 Type *Tys[] = {fArgs[0], fArgs[1]}; 478 if (Name.find("lane") == StringRef::npos) 479 NewFn = Intrinsic::getDeclaration(F->getParent(), 480 StoreInts[fArgs.size() - 3], Tys); 481 else 482 NewFn = Intrinsic::getDeclaration(F->getParent(), 483 StoreLaneInts[fArgs.size() - 5], Tys); 484 return true; 485 } 486 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 487 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 488 return true; 489 } 490 break; 491 } 492 493 case 'c': { 494 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 495 rename(F); 496 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 497 F->arg_begin()->getType()); 498 return true; 499 } 500 if (Name.startswith("cttz.") && F->arg_size() == 1) { 501 rename(F); 502 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 503 F->arg_begin()->getType()); 504 return true; 505 } 506 break; 507 } 508 case 'd': { 509 if (Name == "dbg.value" && F->arg_size() == 4) { 510 rename(F); 511 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value); 512 return true; 513 } 514 break; 515 } 516 case 'i': 517 case 'l': { 518 bool IsLifetimeStart = Name.startswith("lifetime.start"); 519 if (IsLifetimeStart || Name.startswith("invariant.start")) { 520 Intrinsic::ID ID = IsLifetimeStart ? 521 Intrinsic::lifetime_start : Intrinsic::invariant_start; 522 auto Args = F->getFunctionType()->params(); 523 Type* ObjectPtr[1] = {Args[1]}; 524 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 525 rename(F); 526 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 527 return true; 528 } 529 } 530 531 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 532 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 533 Intrinsic::ID ID = IsLifetimeEnd ? 534 Intrinsic::lifetime_end : Intrinsic::invariant_end; 535 536 auto Args = F->getFunctionType()->params(); 537 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 538 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 539 rename(F); 540 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 541 return true; 542 } 543 } 544 if (Name.startswith("invariant.group.barrier")) { 545 // Rename invariant.group.barrier to launder.invariant.group 546 auto Args = F->getFunctionType()->params(); 547 Type* ObjectPtr[1] = {Args[0]}; 548 rename(F); 549 NewFn = Intrinsic::getDeclaration(F->getParent(), 550 Intrinsic::launder_invariant_group, ObjectPtr); 551 return true; 552 553 } 554 555 break; 556 } 557 case 'm': { 558 if (Name.startswith("masked.load.")) { 559 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 560 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 561 rename(F); 562 NewFn = Intrinsic::getDeclaration(F->getParent(), 563 Intrinsic::masked_load, 564 Tys); 565 return true; 566 } 567 } 568 if (Name.startswith("masked.store.")) { 569 auto Args = F->getFunctionType()->params(); 570 Type *Tys[] = { Args[0], Args[1] }; 571 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 572 rename(F); 573 NewFn = Intrinsic::getDeclaration(F->getParent(), 574 Intrinsic::masked_store, 575 Tys); 576 return true; 577 } 578 } 579 // Renaming gather/scatter intrinsics with no address space overloading 580 // to the new overload which includes an address space 581 if (Name.startswith("masked.gather.")) { 582 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; 583 if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) { 584 rename(F); 585 NewFn = Intrinsic::getDeclaration(F->getParent(), 586 Intrinsic::masked_gather, Tys); 587 return true; 588 } 589 } 590 if (Name.startswith("masked.scatter.")) { 591 auto Args = F->getFunctionType()->params(); 592 Type *Tys[] = {Args[0], Args[1]}; 593 if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) { 594 rename(F); 595 NewFn = Intrinsic::getDeclaration(F->getParent(), 596 Intrinsic::masked_scatter, Tys); 597 return true; 598 } 599 } 600 // Updating the memory intrinsics (memcpy/memmove/memset) that have an 601 // alignment parameter to embedding the alignment as an attribute of 602 // the pointer args. 603 if (Name.startswith("memcpy.") && F->arg_size() == 5) { 604 rename(F); 605 // Get the types of dest, src, and len 606 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 607 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy, 608 ParamTypes); 609 return true; 610 } 611 if (Name.startswith("memmove.") && F->arg_size() == 5) { 612 rename(F); 613 // Get the types of dest, src, and len 614 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 615 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove, 616 ParamTypes); 617 return true; 618 } 619 if (Name.startswith("memset.") && F->arg_size() == 5) { 620 rename(F); 621 // Get the types of dest, and len 622 const auto *FT = F->getFunctionType(); 623 Type *ParamTypes[2] = { 624 FT->getParamType(0), // Dest 625 FT->getParamType(2) // len 626 }; 627 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset, 628 ParamTypes); 629 return true; 630 } 631 break; 632 } 633 case 'n': { 634 if (Name.startswith("nvvm.")) { 635 Name = Name.substr(5); 636 637 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 638 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 639 .Cases("brev32", "brev64", Intrinsic::bitreverse) 640 .Case("clz.i", Intrinsic::ctlz) 641 .Case("popc.i", Intrinsic::ctpop) 642 .Default(Intrinsic::not_intrinsic); 643 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 644 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 645 {F->getReturnType()}); 646 return true; 647 } 648 649 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 650 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 651 // 652 // TODO: We could add lohi.i2d. 653 bool Expand = StringSwitch<bool>(Name) 654 .Cases("abs.i", "abs.ll", true) 655 .Cases("clz.ll", "popc.ll", "h2f", true) 656 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 657 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 658 .Default(false); 659 if (Expand) { 660 NewFn = nullptr; 661 return true; 662 } 663 } 664 break; 665 } 666 case 'o': 667 // We only need to change the name to match the mangling including the 668 // address space. 669 if (Name.startswith("objectsize.")) { 670 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 671 if (F->arg_size() == 2 || 672 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 673 rename(F); 674 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 675 Tys); 676 return true; 677 } 678 } 679 break; 680 681 case 's': 682 if (Name == "stackprotectorcheck") { 683 NewFn = nullptr; 684 return true; 685 } 686 break; 687 688 case 'x': 689 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 690 return true; 691 } 692 // Remangle our intrinsic since we upgrade the mangling 693 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 694 if (Result != None) { 695 NewFn = Result.getValue(); 696 return true; 697 } 698 699 // This may not belong here. This function is effectively being overloaded 700 // to both detect an intrinsic which needs upgrading, and to provide the 701 // upgraded form of the intrinsic. We should perhaps have two separate 702 // functions for this. 703 return false; 704 } 705 706 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 707 NewFn = nullptr; 708 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 709 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 710 711 // Upgrade intrinsic attributes. This does not change the function. 712 if (NewFn) 713 F = NewFn; 714 if (Intrinsic::ID id = F->getIntrinsicID()) 715 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 716 return Upgraded; 717 } 718 719 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 720 // Nothing to do yet. 721 return false; 722 } 723 724 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 725 // to byte shuffles. 726 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 727 Value *Op, unsigned Shift) { 728 Type *ResultTy = Op->getType(); 729 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 730 731 // Bitcast from a 64-bit element type to a byte element type. 732 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 733 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 734 735 // We'll be shuffling in zeroes. 736 Value *Res = Constant::getNullValue(VecTy); 737 738 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 739 // we'll just return the zero vector. 740 if (Shift < 16) { 741 uint32_t Idxs[64]; 742 // 256/512-bit version is split into 2/4 16-byte lanes. 743 for (unsigned l = 0; l != NumElts; l += 16) 744 for (unsigned i = 0; i != 16; ++i) { 745 unsigned Idx = NumElts + i - Shift; 746 if (Idx < NumElts) 747 Idx -= NumElts - 16; // end of lane, switch operand. 748 Idxs[l + i] = Idx + l; 749 } 750 751 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 752 } 753 754 // Bitcast back to a 64-bit element type. 755 return Builder.CreateBitCast(Res, ResultTy, "cast"); 756 } 757 758 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 759 // to byte shuffles. 760 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 761 unsigned Shift) { 762 Type *ResultTy = Op->getType(); 763 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 764 765 // Bitcast from a 64-bit element type to a byte element type. 766 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 767 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 768 769 // We'll be shuffling in zeroes. 770 Value *Res = Constant::getNullValue(VecTy); 771 772 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 773 // we'll just return the zero vector. 774 if (Shift < 16) { 775 uint32_t Idxs[64]; 776 // 256/512-bit version is split into 2/4 16-byte lanes. 777 for (unsigned l = 0; l != NumElts; l += 16) 778 for (unsigned i = 0; i != 16; ++i) { 779 unsigned Idx = i + Shift; 780 if (Idx >= 16) 781 Idx += NumElts - 16; // end of lane, switch operand. 782 Idxs[l + i] = Idx + l; 783 } 784 785 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 786 } 787 788 // Bitcast back to a 64-bit element type. 789 return Builder.CreateBitCast(Res, ResultTy, "cast"); 790 } 791 792 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 793 unsigned NumElts) { 794 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 795 cast<IntegerType>(Mask->getType())->getBitWidth()); 796 Mask = Builder.CreateBitCast(Mask, MaskTy); 797 798 // If we have less than 8 elements, then the starting mask was an i8 and 799 // we need to extract down to the right number of elements. 800 if (NumElts < 8) { 801 uint32_t Indices[4]; 802 for (unsigned i = 0; i != NumElts; ++i) 803 Indices[i] = i; 804 Mask = Builder.CreateShuffleVector(Mask, Mask, 805 makeArrayRef(Indices, NumElts), 806 "extract"); 807 } 808 809 return Mask; 810 } 811 812 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 813 Value *Op0, Value *Op1) { 814 // If the mask is all ones just emit the align operation. 815 if (const auto *C = dyn_cast<Constant>(Mask)) 816 if (C->isAllOnesValue()) 817 return Op0; 818 819 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 820 return Builder.CreateSelect(Mask, Op0, Op1); 821 } 822 823 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 824 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 825 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 826 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 827 Value *Op1, Value *Shift, 828 Value *Passthru, Value *Mask, 829 bool IsVALIGN) { 830 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 831 832 unsigned NumElts = Op0->getType()->getVectorNumElements(); 833 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 834 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 835 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 836 837 // Mask the immediate for VALIGN. 838 if (IsVALIGN) 839 ShiftVal &= (NumElts - 1); 840 841 // If palignr is shifting the pair of vectors more than the size of two 842 // lanes, emit zero. 843 if (ShiftVal >= 32) 844 return llvm::Constant::getNullValue(Op0->getType()); 845 846 // If palignr is shifting the pair of input vectors more than one lane, 847 // but less than two lanes, convert to shifting in zeroes. 848 if (ShiftVal > 16) { 849 ShiftVal -= 16; 850 Op1 = Op0; 851 Op0 = llvm::Constant::getNullValue(Op0->getType()); 852 } 853 854 uint32_t Indices[64]; 855 // 256-bit palignr operates on 128-bit lanes so we need to handle that 856 for (unsigned l = 0; l < NumElts; l += 16) { 857 for (unsigned i = 0; i != 16; ++i) { 858 unsigned Idx = ShiftVal + i; 859 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 860 Idx += NumElts - 16; // End of lane, switch operand. 861 Indices[l + i] = Idx + l; 862 } 863 } 864 865 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 866 makeArrayRef(Indices, NumElts), 867 "palignr"); 868 869 return EmitX86Select(Builder, Mask, Align, Passthru); 870 } 871 872 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 873 Value *Ptr, Value *Data, Value *Mask, 874 bool Aligned) { 875 // Cast the pointer to the right type. 876 Ptr = Builder.CreateBitCast(Ptr, 877 llvm::PointerType::getUnqual(Data->getType())); 878 unsigned Align = 879 Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1; 880 881 // If the mask is all ones just emit a regular store. 882 if (const auto *C = dyn_cast<Constant>(Mask)) 883 if (C->isAllOnesValue()) 884 return Builder.CreateAlignedStore(Data, Ptr, Align); 885 886 // Convert the mask from an integer type to a vector of i1. 887 unsigned NumElts = Data->getType()->getVectorNumElements(); 888 Mask = getX86MaskVec(Builder, Mask, NumElts); 889 return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); 890 } 891 892 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 893 Value *Ptr, Value *Passthru, Value *Mask, 894 bool Aligned) { 895 // Cast the pointer to the right type. 896 Ptr = Builder.CreateBitCast(Ptr, 897 llvm::PointerType::getUnqual(Passthru->getType())); 898 unsigned Align = 899 Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1; 900 901 // If the mask is all ones just emit a regular store. 902 if (const auto *C = dyn_cast<Constant>(Mask)) 903 if (C->isAllOnesValue()) 904 return Builder.CreateAlignedLoad(Ptr, Align); 905 906 // Convert the mask from an integer type to a vector of i1. 907 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 908 Mask = getX86MaskVec(Builder, Mask, NumElts); 909 return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); 910 } 911 912 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { 913 Value *Op0 = CI.getArgOperand(0); 914 llvm::Type *Ty = Op0->getType(); 915 Value *Zero = llvm::Constant::getNullValue(Ty); 916 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero); 917 Value *Neg = Builder.CreateNeg(Op0); 918 Value *Res = Builder.CreateSelect(Cmp, Op0, Neg); 919 920 if (CI.getNumArgOperands() == 3) 921 Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1)); 922 923 return Res; 924 } 925 926 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 927 ICmpInst::Predicate Pred) { 928 Value *Op0 = CI.getArgOperand(0); 929 Value *Op1 = CI.getArgOperand(1); 930 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 931 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 932 933 if (CI.getNumArgOperands() == 4) 934 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 935 936 return Res; 937 } 938 939 static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) { 940 Type *Ty = CI.getType(); 941 942 // Arguments have a vXi32 type so cast to vXi64. 943 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty); 944 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty); 945 946 if (IsSigned) { 947 // Shift left then arithmetic shift right. 948 Constant *ShiftAmt = ConstantInt::get(Ty, 32); 949 LHS = Builder.CreateShl(LHS, ShiftAmt); 950 LHS = Builder.CreateAShr(LHS, ShiftAmt); 951 RHS = Builder.CreateShl(RHS, ShiftAmt); 952 RHS = Builder.CreateAShr(RHS, ShiftAmt); 953 } else { 954 // Clear the upper bits. 955 Constant *Mask = ConstantInt::get(Ty, 0xffffffff); 956 LHS = Builder.CreateAnd(LHS, Mask); 957 RHS = Builder.CreateAnd(RHS, Mask); 958 } 959 960 Value *Res = Builder.CreateMul(LHS, RHS); 961 962 if (CI.getNumArgOperands() == 4) 963 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 964 965 return Res; 966 } 967 968 // Applying mask on vector of i1's and make sure result is at least 8 bits wide. 969 static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, 970 Value *Mask) { 971 unsigned NumElts = Vec->getType()->getVectorNumElements(); 972 if (Mask) { 973 const auto *C = dyn_cast<Constant>(Mask); 974 if (!C || !C->isAllOnesValue()) 975 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts)); 976 } 977 978 if (NumElts < 8) { 979 uint32_t Indices[8]; 980 for (unsigned i = 0; i != NumElts; ++i) 981 Indices[i] = i; 982 for (unsigned i = NumElts; i != 8; ++i) 983 Indices[i] = NumElts + i % NumElts; 984 Vec = Builder.CreateShuffleVector(Vec, 985 Constant::getNullValue(Vec->getType()), 986 Indices); 987 } 988 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U))); 989 } 990 991 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 992 unsigned CC, bool Signed) { 993 Value *Op0 = CI.getArgOperand(0); 994 unsigned NumElts = Op0->getType()->getVectorNumElements(); 995 996 Value *Cmp; 997 if (CC == 3) { 998 Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 999 } else if (CC == 7) { 1000 Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1001 } else { 1002 ICmpInst::Predicate Pred; 1003 switch (CC) { 1004 default: llvm_unreachable("Unknown condition code"); 1005 case 0: Pred = ICmpInst::ICMP_EQ; break; 1006 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 1007 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 1008 case 4: Pred = ICmpInst::ICMP_NE; break; 1009 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 1010 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 1011 } 1012 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 1013 } 1014 1015 Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1); 1016 1017 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask); 1018 } 1019 1020 // Replace a masked intrinsic with an older unmasked intrinsic. 1021 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 1022 Intrinsic::ID IID) { 1023 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID); 1024 Value *Rep = Builder.CreateCall(Intrin, 1025 { CI.getArgOperand(0), CI.getArgOperand(1) }); 1026 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 1027 } 1028 1029 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 1030 Value* A = CI.getArgOperand(0); 1031 Value* B = CI.getArgOperand(1); 1032 Value* Src = CI.getArgOperand(2); 1033 Value* Mask = CI.getArgOperand(3); 1034 1035 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 1036 Value* Cmp = Builder.CreateIsNotNull(AndNode); 1037 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 1038 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 1039 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 1040 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 1041 } 1042 1043 1044 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 1045 Value* Op = CI.getArgOperand(0); 1046 Type* ReturnOp = CI.getType(); 1047 unsigned NumElts = CI.getType()->getVectorNumElements(); 1048 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 1049 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 1050 } 1051 1052 // Replace intrinsic with unmasked version and a select. 1053 static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, 1054 CallInst &CI, Value *&Rep) { 1055 Name = Name.substr(12); // Remove avx512.mask. 1056 1057 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits(); 1058 unsigned EltWidth = CI.getType()->getScalarSizeInBits(); 1059 Intrinsic::ID IID; 1060 if (Name.startswith("max.p")) { 1061 if (VecWidth == 128 && EltWidth == 32) 1062 IID = Intrinsic::x86_sse_max_ps; 1063 else if (VecWidth == 128 && EltWidth == 64) 1064 IID = Intrinsic::x86_sse2_max_pd; 1065 else if (VecWidth == 256 && EltWidth == 32) 1066 IID = Intrinsic::x86_avx_max_ps_256; 1067 else if (VecWidth == 256 && EltWidth == 64) 1068 IID = Intrinsic::x86_avx_max_pd_256; 1069 else 1070 llvm_unreachable("Unexpected intrinsic"); 1071 } else if (Name.startswith("min.p")) { 1072 if (VecWidth == 128 && EltWidth == 32) 1073 IID = Intrinsic::x86_sse_min_ps; 1074 else if (VecWidth == 128 && EltWidth == 64) 1075 IID = Intrinsic::x86_sse2_min_pd; 1076 else if (VecWidth == 256 && EltWidth == 32) 1077 IID = Intrinsic::x86_avx_min_ps_256; 1078 else if (VecWidth == 256 && EltWidth == 64) 1079 IID = Intrinsic::x86_avx_min_pd_256; 1080 else 1081 llvm_unreachable("Unexpected intrinsic"); 1082 } else if (Name.startswith("pshuf.b.")) { 1083 if (VecWidth == 128) 1084 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1085 else if (VecWidth == 256) 1086 IID = Intrinsic::x86_avx2_pshuf_b; 1087 else if (VecWidth == 512) 1088 IID = Intrinsic::x86_avx512_pshuf_b_512; 1089 else 1090 llvm_unreachable("Unexpected intrinsic"); 1091 } else if (Name.startswith("pmul.hr.sw.")) { 1092 if (VecWidth == 128) 1093 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128; 1094 else if (VecWidth == 256) 1095 IID = Intrinsic::x86_avx2_pmul_hr_sw; 1096 else if (VecWidth == 512) 1097 IID = Intrinsic::x86_avx512_pmul_hr_sw_512; 1098 else 1099 llvm_unreachable("Unexpected intrinsic"); 1100 } else if (Name.startswith("pmulh.w.")) { 1101 if (VecWidth == 128) 1102 IID = Intrinsic::x86_sse2_pmulh_w; 1103 else if (VecWidth == 256) 1104 IID = Intrinsic::x86_avx2_pmulh_w; 1105 else if (VecWidth == 512) 1106 IID = Intrinsic::x86_avx512_pmulh_w_512; 1107 else 1108 llvm_unreachable("Unexpected intrinsic"); 1109 } else if (Name.startswith("pmulhu.w.")) { 1110 if (VecWidth == 128) 1111 IID = Intrinsic::x86_sse2_pmulhu_w; 1112 else if (VecWidth == 256) 1113 IID = Intrinsic::x86_avx2_pmulhu_w; 1114 else if (VecWidth == 512) 1115 IID = Intrinsic::x86_avx512_pmulhu_w_512; 1116 else 1117 llvm_unreachable("Unexpected intrinsic"); 1118 } else if (Name.startswith("pmaddw.d.")) { 1119 if (VecWidth == 128) 1120 IID = Intrinsic::x86_sse2_pmadd_wd; 1121 else if (VecWidth == 256) 1122 IID = Intrinsic::x86_avx2_pmadd_wd; 1123 else if (VecWidth == 512) 1124 IID = Intrinsic::x86_avx512_pmaddw_d_512; 1125 else 1126 llvm_unreachable("Unexpected intrinsic"); 1127 } else if (Name.startswith("pmaddubs.w.")) { 1128 if (VecWidth == 128) 1129 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128; 1130 else if (VecWidth == 256) 1131 IID = Intrinsic::x86_avx2_pmadd_ub_sw; 1132 else if (VecWidth == 512) 1133 IID = Intrinsic::x86_avx512_pmaddubs_w_512; 1134 else 1135 llvm_unreachable("Unexpected intrinsic"); 1136 } else if (Name.startswith("packsswb.")) { 1137 if (VecWidth == 128) 1138 IID = Intrinsic::x86_sse2_packsswb_128; 1139 else if (VecWidth == 256) 1140 IID = Intrinsic::x86_avx2_packsswb; 1141 else if (VecWidth == 512) 1142 IID = Intrinsic::x86_avx512_packsswb_512; 1143 else 1144 llvm_unreachable("Unexpected intrinsic"); 1145 } else if (Name.startswith("packssdw.")) { 1146 if (VecWidth == 128) 1147 IID = Intrinsic::x86_sse2_packssdw_128; 1148 else if (VecWidth == 256) 1149 IID = Intrinsic::x86_avx2_packssdw; 1150 else if (VecWidth == 512) 1151 IID = Intrinsic::x86_avx512_packssdw_512; 1152 else 1153 llvm_unreachable("Unexpected intrinsic"); 1154 } else if (Name.startswith("packuswb.")) { 1155 if (VecWidth == 128) 1156 IID = Intrinsic::x86_sse2_packuswb_128; 1157 else if (VecWidth == 256) 1158 IID = Intrinsic::x86_avx2_packuswb; 1159 else if (VecWidth == 512) 1160 IID = Intrinsic::x86_avx512_packuswb_512; 1161 else 1162 llvm_unreachable("Unexpected intrinsic"); 1163 } else if (Name.startswith("packusdw.")) { 1164 if (VecWidth == 128) 1165 IID = Intrinsic::x86_sse41_packusdw; 1166 else if (VecWidth == 256) 1167 IID = Intrinsic::x86_avx2_packusdw; 1168 else if (VecWidth == 512) 1169 IID = Intrinsic::x86_avx512_packusdw_512; 1170 else 1171 llvm_unreachable("Unexpected intrinsic"); 1172 } else if (Name.startswith("vpermilvar.")) { 1173 if (VecWidth == 128 && EltWidth == 32) 1174 IID = Intrinsic::x86_avx_vpermilvar_ps; 1175 else if (VecWidth == 128 && EltWidth == 64) 1176 IID = Intrinsic::x86_avx_vpermilvar_pd; 1177 else if (VecWidth == 256 && EltWidth == 32) 1178 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 1179 else if (VecWidth == 256 && EltWidth == 64) 1180 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 1181 else if (VecWidth == 512 && EltWidth == 32) 1182 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 1183 else if (VecWidth == 512 && EltWidth == 64) 1184 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 1185 else 1186 llvm_unreachable("Unexpected intrinsic"); 1187 } else if (Name == "cvtpd2dq.256") { 1188 IID = Intrinsic::x86_avx_cvt_pd2dq_256; 1189 } else if (Name == "cvtpd2ps.256") { 1190 IID = Intrinsic::x86_avx_cvt_pd2_ps_256; 1191 } else if (Name == "cvttpd2dq.256") { 1192 IID = Intrinsic::x86_avx_cvtt_pd2dq_256; 1193 } else if (Name == "cvttps2dq.128") { 1194 IID = Intrinsic::x86_sse2_cvttps2dq; 1195 } else if (Name == "cvttps2dq.256") { 1196 IID = Intrinsic::x86_avx_cvtt_ps2dq_256; 1197 } else if (Name.startswith("permvar.")) { 1198 bool IsFloat = CI.getType()->isFPOrFPVectorTy(); 1199 if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1200 IID = Intrinsic::x86_avx2_permps; 1201 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1202 IID = Intrinsic::x86_avx2_permd; 1203 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1204 IID = Intrinsic::x86_avx512_permvar_df_256; 1205 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1206 IID = Intrinsic::x86_avx512_permvar_di_256; 1207 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1208 IID = Intrinsic::x86_avx512_permvar_sf_512; 1209 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1210 IID = Intrinsic::x86_avx512_permvar_si_512; 1211 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1212 IID = Intrinsic::x86_avx512_permvar_df_512; 1213 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1214 IID = Intrinsic::x86_avx512_permvar_di_512; 1215 else if (VecWidth == 128 && EltWidth == 16) 1216 IID = Intrinsic::x86_avx512_permvar_hi_128; 1217 else if (VecWidth == 256 && EltWidth == 16) 1218 IID = Intrinsic::x86_avx512_permvar_hi_256; 1219 else if (VecWidth == 512 && EltWidth == 16) 1220 IID = Intrinsic::x86_avx512_permvar_hi_512; 1221 else if (VecWidth == 128 && EltWidth == 8) 1222 IID = Intrinsic::x86_avx512_permvar_qi_128; 1223 else if (VecWidth == 256 && EltWidth == 8) 1224 IID = Intrinsic::x86_avx512_permvar_qi_256; 1225 else if (VecWidth == 512 && EltWidth == 8) 1226 IID = Intrinsic::x86_avx512_permvar_qi_512; 1227 else 1228 llvm_unreachable("Unexpected intrinsic"); 1229 } else if (Name.startswith("dbpsadbw.")) { 1230 if (VecWidth == 128) 1231 IID = Intrinsic::x86_avx512_dbpsadbw_128; 1232 else if (VecWidth == 256) 1233 IID = Intrinsic::x86_avx512_dbpsadbw_256; 1234 else if (VecWidth == 512) 1235 IID = Intrinsic::x86_avx512_dbpsadbw_512; 1236 else 1237 llvm_unreachable("Unexpected intrinsic"); 1238 } else if (Name.startswith("vpshld.")) { 1239 if (VecWidth == 128 && Name[7] == 'q') 1240 IID = Intrinsic::x86_avx512_vpshld_q_128; 1241 else if (VecWidth == 128 && Name[7] == 'd') 1242 IID = Intrinsic::x86_avx512_vpshld_d_128; 1243 else if (VecWidth == 128 && Name[7] == 'w') 1244 IID = Intrinsic::x86_avx512_vpshld_w_128; 1245 else if (VecWidth == 256 && Name[7] == 'q') 1246 IID = Intrinsic::x86_avx512_vpshld_q_256; 1247 else if (VecWidth == 256 && Name[7] == 'd') 1248 IID = Intrinsic::x86_avx512_vpshld_d_256; 1249 else if (VecWidth == 256 && Name[7] == 'w') 1250 IID = Intrinsic::x86_avx512_vpshld_w_256; 1251 else if (VecWidth == 512 && Name[7] == 'q') 1252 IID = Intrinsic::x86_avx512_vpshld_q_512; 1253 else if (VecWidth == 512 && Name[7] == 'd') 1254 IID = Intrinsic::x86_avx512_vpshld_d_512; 1255 else if (VecWidth == 512 && Name[7] == 'w') 1256 IID = Intrinsic::x86_avx512_vpshld_w_512; 1257 else 1258 llvm_unreachable("Unexpected intrinsic"); 1259 } else if (Name.startswith("vpshrd.")) { 1260 if (VecWidth == 128 && Name[7] == 'q') 1261 IID = Intrinsic::x86_avx512_vpshrd_q_128; 1262 else if (VecWidth == 128 && Name[7] == 'd') 1263 IID = Intrinsic::x86_avx512_vpshrd_d_128; 1264 else if (VecWidth == 128 && Name[7] == 'w') 1265 IID = Intrinsic::x86_avx512_vpshrd_w_128; 1266 else if (VecWidth == 256 && Name[7] == 'q') 1267 IID = Intrinsic::x86_avx512_vpshrd_q_256; 1268 else if (VecWidth == 256 && Name[7] == 'd') 1269 IID = Intrinsic::x86_avx512_vpshrd_d_256; 1270 else if (VecWidth == 256 && Name[7] == 'w') 1271 IID = Intrinsic::x86_avx512_vpshrd_w_256; 1272 else if (VecWidth == 512 && Name[7] == 'q') 1273 IID = Intrinsic::x86_avx512_vpshrd_q_512; 1274 else if (VecWidth == 512 && Name[7] == 'd') 1275 IID = Intrinsic::x86_avx512_vpshrd_d_512; 1276 else if (VecWidth == 512 && Name[7] == 'w') 1277 IID = Intrinsic::x86_avx512_vpshrd_w_512; 1278 else 1279 llvm_unreachable("Unexpected intrinsic"); 1280 } else 1281 return false; 1282 1283 SmallVector<Value *, 4> Args(CI.arg_operands().begin(), 1284 CI.arg_operands().end()); 1285 Args.pop_back(); 1286 Args.pop_back(); 1287 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1288 Args); 1289 unsigned NumArgs = CI.getNumArgOperands(); 1290 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep, 1291 CI.getArgOperand(NumArgs - 2)); 1292 return true; 1293 } 1294 1295 /// Upgrade comment in call to inline asm that represents an objc retain release 1296 /// marker. 1297 void llvm::UpgradeInlineAsmString(std::string *AsmStr) { 1298 size_t Pos; 1299 if (AsmStr->find("mov\tfp") == 0 && 1300 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos && 1301 (Pos = AsmStr->find("# marker")) != std::string::npos) { 1302 AsmStr->replace(Pos, 1, ";"); 1303 } 1304 return; 1305 } 1306 1307 /// Upgrade a call to an old intrinsic. All argument and return casting must be 1308 /// provided to seamlessly integrate with existing context. 1309 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 1310 Function *F = CI->getCalledFunction(); 1311 LLVMContext &C = CI->getContext(); 1312 IRBuilder<> Builder(C); 1313 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 1314 1315 assert(F && "Intrinsic call is not direct?"); 1316 1317 if (!NewFn) { 1318 // Get the Function's name. 1319 StringRef Name = F->getName(); 1320 1321 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 1322 Name = Name.substr(5); 1323 1324 bool IsX86 = Name.startswith("x86."); 1325 if (IsX86) 1326 Name = Name.substr(4); 1327 bool IsNVVM = Name.startswith("nvvm."); 1328 if (IsNVVM) 1329 Name = Name.substr(5); 1330 1331 if (IsX86 && Name.startswith("sse4a.movnt.")) { 1332 Module *M = F->getParent(); 1333 SmallVector<Metadata *, 1> Elts; 1334 Elts.push_back( 1335 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1336 MDNode *Node = MDNode::get(C, Elts); 1337 1338 Value *Arg0 = CI->getArgOperand(0); 1339 Value *Arg1 = CI->getArgOperand(1); 1340 1341 // Nontemporal (unaligned) store of the 0'th element of the float/double 1342 // vector. 1343 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 1344 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 1345 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 1346 Value *Extract = 1347 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 1348 1349 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); 1350 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1351 1352 // Remove intrinsic. 1353 CI->eraseFromParent(); 1354 return; 1355 } 1356 1357 if (IsX86 && (Name.startswith("avx.movnt.") || 1358 Name.startswith("avx512.storent."))) { 1359 Module *M = F->getParent(); 1360 SmallVector<Metadata *, 1> Elts; 1361 Elts.push_back( 1362 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1363 MDNode *Node = MDNode::get(C, Elts); 1364 1365 Value *Arg0 = CI->getArgOperand(0); 1366 Value *Arg1 = CI->getArgOperand(1); 1367 1368 // Convert the type of the pointer to a pointer to the stored type. 1369 Value *BC = Builder.CreateBitCast(Arg0, 1370 PointerType::getUnqual(Arg1->getType()), 1371 "cast"); 1372 VectorType *VTy = cast<VectorType>(Arg1->getType()); 1373 StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 1374 VTy->getBitWidth() / 8); 1375 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1376 1377 // Remove intrinsic. 1378 CI->eraseFromParent(); 1379 return; 1380 } 1381 1382 if (IsX86 && Name == "sse2.storel.dq") { 1383 Value *Arg0 = CI->getArgOperand(0); 1384 Value *Arg1 = CI->getArgOperand(1); 1385 1386 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 1387 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 1388 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 1389 Value *BC = Builder.CreateBitCast(Arg0, 1390 PointerType::getUnqual(Elt->getType()), 1391 "cast"); 1392 Builder.CreateAlignedStore(Elt, BC, 1); 1393 1394 // Remove intrinsic. 1395 CI->eraseFromParent(); 1396 return; 1397 } 1398 1399 if (IsX86 && (Name.startswith("sse.storeu.") || 1400 Name.startswith("sse2.storeu.") || 1401 Name.startswith("avx.storeu."))) { 1402 Value *Arg0 = CI->getArgOperand(0); 1403 Value *Arg1 = CI->getArgOperand(1); 1404 1405 Arg0 = Builder.CreateBitCast(Arg0, 1406 PointerType::getUnqual(Arg1->getType()), 1407 "cast"); 1408 Builder.CreateAlignedStore(Arg1, Arg0, 1); 1409 1410 // Remove intrinsic. 1411 CI->eraseFromParent(); 1412 return; 1413 } 1414 1415 if (IsX86 && Name == "avx512.mask.store.ss") { 1416 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1)); 1417 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1418 Mask, false); 1419 1420 // Remove intrinsic. 1421 CI->eraseFromParent(); 1422 return; 1423 } 1424 1425 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 1426 // "avx512.mask.storeu." or "avx512.mask.store." 1427 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 1428 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1429 CI->getArgOperand(2), Aligned); 1430 1431 // Remove intrinsic. 1432 CI->eraseFromParent(); 1433 return; 1434 } 1435 1436 Value *Rep; 1437 // Upgrade packed integer vector compare intrinsics to compare instructions. 1438 if (IsX86 && (Name.startswith("sse2.pcmp") || 1439 Name.startswith("avx2.pcmp"))) { 1440 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 1441 bool CmpEq = Name[9] == 'e'; 1442 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 1443 CI->getArgOperand(0), CI->getArgOperand(1)); 1444 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 1445 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) { 1446 Type *ExtTy = Type::getInt32Ty(C); 1447 if (CI->getOperand(0)->getType()->isIntegerTy(8)) 1448 ExtTy = Type::getInt64Ty(C); 1449 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 1450 ExtTy->getPrimitiveSizeInBits(); 1451 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy); 1452 Rep = Builder.CreateVectorSplat(NumElts, Rep); 1453 } else if (IsX86 && (Name == "sse.sqrt.ss" || 1454 Name == "sse2.sqrt.sd")) { 1455 Value *Vec = CI->getArgOperand(0); 1456 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0); 1457 Function *Intr = Intrinsic::getDeclaration(F->getParent(), 1458 Intrinsic::sqrt, Elt0->getType()); 1459 Elt0 = Builder.CreateCall(Intr, Elt0); 1460 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0); 1461 } else if (IsX86 && (Name.startswith("avx.sqrt.p") || 1462 Name.startswith("sse2.sqrt.p") || 1463 Name.startswith("sse.sqrt.p"))) { 1464 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1465 Intrinsic::sqrt, 1466 CI->getType()), 1467 {CI->getArgOperand(0)}); 1468 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p") && 1469 !Name.endswith("512"))) { 1470 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1471 Intrinsic::sqrt, 1472 CI->getType()), 1473 {CI->getArgOperand(0)}); 1474 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1475 CI->getArgOperand(1)); 1476 } else if (IsX86 && (Name.startswith("avx512.ptestm") || 1477 Name.startswith("avx512.ptestnm"))) { 1478 Value *Op0 = CI->getArgOperand(0); 1479 Value *Op1 = CI->getArgOperand(1); 1480 Value *Mask = CI->getArgOperand(2); 1481 Rep = Builder.CreateAnd(Op0, Op1); 1482 llvm::Type *Ty = Op0->getType(); 1483 Value *Zero = llvm::Constant::getNullValue(Ty); 1484 ICmpInst::Predicate Pred = 1485 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ; 1486 Rep = Builder.CreateICmp(Pred, Rep, Zero); 1487 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); 1488 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ 1489 unsigned NumElts = 1490 CI->getArgOperand(1)->getType()->getVectorNumElements(); 1491 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); 1492 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1493 CI->getArgOperand(1)); 1494 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) { 1495 unsigned NumElts = CI->getType()->getScalarSizeInBits(); 1496 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts); 1497 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts); 1498 uint32_t Indices[64]; 1499 for (unsigned i = 0; i != NumElts; ++i) 1500 Indices[i] = i; 1501 1502 // First extract half of each vector. This gives better codegen than 1503 // doing it in a single shuffle. 1504 LHS = Builder.CreateShuffleVector(LHS, LHS, 1505 makeArrayRef(Indices, NumElts / 2)); 1506 RHS = Builder.CreateShuffleVector(RHS, RHS, 1507 makeArrayRef(Indices, NumElts / 2)); 1508 // Concat the vectors. 1509 // NOTE: Operands have to be swapped to match intrinsic definition. 1510 Rep = Builder.CreateShuffleVector(RHS, LHS, 1511 makeArrayRef(Indices, NumElts)); 1512 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1513 } else if (IsX86 && Name == "avx512.kand.w") { 1514 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1515 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1516 Rep = Builder.CreateAnd(LHS, RHS); 1517 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1518 } else if (IsX86 && Name == "avx512.kandn.w") { 1519 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1520 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1521 LHS = Builder.CreateNot(LHS); 1522 Rep = Builder.CreateAnd(LHS, RHS); 1523 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1524 } else if (IsX86 && Name == "avx512.kor.w") { 1525 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1526 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1527 Rep = Builder.CreateOr(LHS, RHS); 1528 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1529 } else if (IsX86 && Name == "avx512.kxor.w") { 1530 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1531 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1532 Rep = Builder.CreateXor(LHS, RHS); 1533 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1534 } else if (IsX86 && Name == "avx512.kxnor.w") { 1535 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1536 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1537 LHS = Builder.CreateNot(LHS); 1538 Rep = Builder.CreateXor(LHS, RHS); 1539 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1540 } else if (IsX86 && Name == "avx512.knot.w") { 1541 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1542 Rep = Builder.CreateNot(Rep); 1543 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1544 } else if (IsX86 && 1545 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) { 1546 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1547 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1548 Rep = Builder.CreateOr(LHS, RHS); 1549 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty()); 1550 Value *C; 1551 if (Name[14] == 'c') 1552 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty()); 1553 else 1554 C = ConstantInt::getNullValue(Builder.getInt16Ty()); 1555 Rep = Builder.CreateICmpEQ(Rep, C); 1556 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty()); 1557 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd")) { 1558 Type *I32Ty = Type::getInt32Ty(C); 1559 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1560 ConstantInt::get(I32Ty, 0)); 1561 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1562 ConstantInt::get(I32Ty, 0)); 1563 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 1564 Builder.CreateFAdd(Elt0, Elt1), 1565 ConstantInt::get(I32Ty, 0)); 1566 } else if (IsX86 && (Name == "sse.sub.ss" || Name == "sse2.sub.sd")) { 1567 Type *I32Ty = Type::getInt32Ty(C); 1568 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1569 ConstantInt::get(I32Ty, 0)); 1570 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1571 ConstantInt::get(I32Ty, 0)); 1572 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 1573 Builder.CreateFSub(Elt0, Elt1), 1574 ConstantInt::get(I32Ty, 0)); 1575 } else if (IsX86 && (Name == "sse.mul.ss" || Name == "sse2.mul.sd")) { 1576 Type *I32Ty = Type::getInt32Ty(C); 1577 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1578 ConstantInt::get(I32Ty, 0)); 1579 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1580 ConstantInt::get(I32Ty, 0)); 1581 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 1582 Builder.CreateFMul(Elt0, Elt1), 1583 ConstantInt::get(I32Ty, 0)); 1584 } else if (IsX86 && (Name == "sse.div.ss" || Name == "sse2.div.sd")) { 1585 Type *I32Ty = Type::getInt32Ty(C); 1586 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1587 ConstantInt::get(I32Ty, 0)); 1588 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1589 ConstantInt::get(I32Ty, 0)); 1590 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), 1591 Builder.CreateFDiv(Elt0, Elt1), 1592 ConstantInt::get(I32Ty, 0)); 1593 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 1594 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 1595 bool CmpEq = Name[16] == 'e'; 1596 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true); 1597 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) { 1598 Type *OpTy = CI->getArgOperand(0)->getType(); 1599 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1600 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1601 Intrinsic::ID IID; 1602 if (VecWidth == 128 && EltWidth == 32) 1603 IID = Intrinsic::x86_avx512_fpclass_ps_128; 1604 else if (VecWidth == 256 && EltWidth == 32) 1605 IID = Intrinsic::x86_avx512_fpclass_ps_256; 1606 else if (VecWidth == 512 && EltWidth == 32) 1607 IID = Intrinsic::x86_avx512_fpclass_ps_512; 1608 else if (VecWidth == 128 && EltWidth == 64) 1609 IID = Intrinsic::x86_avx512_fpclass_pd_128; 1610 else if (VecWidth == 256 && EltWidth == 64) 1611 IID = Intrinsic::x86_avx512_fpclass_pd_256; 1612 else if (VecWidth == 512 && EltWidth == 64) 1613 IID = Intrinsic::x86_avx512_fpclass_pd_512; 1614 else 1615 llvm_unreachable("Unexpected intrinsic"); 1616 1617 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1618 { CI->getOperand(0), CI->getArgOperand(1) }); 1619 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1620 } else if (IsX86 && Name.startswith("avx512.mask.cmp.p")) { 1621 Type *OpTy = CI->getArgOperand(0)->getType(); 1622 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1623 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1624 Intrinsic::ID IID; 1625 if (VecWidth == 128 && EltWidth == 32) 1626 IID = Intrinsic::x86_avx512_cmp_ps_128; 1627 else if (VecWidth == 256 && EltWidth == 32) 1628 IID = Intrinsic::x86_avx512_cmp_ps_256; 1629 else if (VecWidth == 512 && EltWidth == 32) 1630 IID = Intrinsic::x86_avx512_cmp_ps_512; 1631 else if (VecWidth == 128 && EltWidth == 64) 1632 IID = Intrinsic::x86_avx512_cmp_pd_128; 1633 else if (VecWidth == 256 && EltWidth == 64) 1634 IID = Intrinsic::x86_avx512_cmp_pd_256; 1635 else if (VecWidth == 512 && EltWidth == 64) 1636 IID = Intrinsic::x86_avx512_cmp_pd_512; 1637 else 1638 llvm_unreachable("Unexpected intrinsic"); 1639 1640 SmallVector<Value *, 4> Args; 1641 Args.push_back(CI->getArgOperand(0)); 1642 Args.push_back(CI->getArgOperand(1)); 1643 Args.push_back(CI->getArgOperand(2)); 1644 if (CI->getNumArgOperands() == 5) 1645 Args.push_back(CI->getArgOperand(4)); 1646 1647 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1648 Args); 1649 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(3)); 1650 } else if (IsX86 && Name.startswith("avx512.mask.cmp.") && 1651 Name[16] != 'p') { 1652 // Integer compare intrinsics. 1653 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1654 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true); 1655 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) { 1656 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1657 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false); 1658 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") || 1659 Name.startswith("avx512.cvtw2mask.") || 1660 Name.startswith("avx512.cvtd2mask.") || 1661 Name.startswith("avx512.cvtq2mask."))) { 1662 Value *Op = CI->getArgOperand(0); 1663 Value *Zero = llvm::Constant::getNullValue(Op->getType()); 1664 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero); 1665 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr); 1666 } else if(IsX86 && (Name == "ssse3.pabs.b.128" || 1667 Name == "ssse3.pabs.w.128" || 1668 Name == "ssse3.pabs.d.128" || 1669 Name.startswith("avx2.pabs") || 1670 Name.startswith("avx512.mask.pabs"))) { 1671 Rep = upgradeAbs(Builder, *CI); 1672 } else if (IsX86 && (Name == "sse41.pmaxsb" || 1673 Name == "sse2.pmaxs.w" || 1674 Name == "sse41.pmaxsd" || 1675 Name.startswith("avx2.pmaxs") || 1676 Name.startswith("avx512.mask.pmaxs"))) { 1677 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 1678 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 1679 Name == "sse41.pmaxuw" || 1680 Name == "sse41.pmaxud" || 1681 Name.startswith("avx2.pmaxu") || 1682 Name.startswith("avx512.mask.pmaxu"))) { 1683 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 1684 } else if (IsX86 && (Name == "sse41.pminsb" || 1685 Name == "sse2.pmins.w" || 1686 Name == "sse41.pminsd" || 1687 Name.startswith("avx2.pmins") || 1688 Name.startswith("avx512.mask.pmins"))) { 1689 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 1690 } else if (IsX86 && (Name == "sse2.pminu.b" || 1691 Name == "sse41.pminuw" || 1692 Name == "sse41.pminud" || 1693 Name.startswith("avx2.pminu") || 1694 Name.startswith("avx512.mask.pminu"))) { 1695 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 1696 } else if (IsX86 && (Name == "sse2.pmulu.dq" || 1697 Name == "avx2.pmulu.dq" || 1698 Name == "avx512.pmulu.dq.512" || 1699 Name.startswith("avx512.mask.pmulu.dq."))) { 1700 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false); 1701 } else if (IsX86 && (Name == "sse41.pmuldq" || 1702 Name == "avx2.pmul.dq" || 1703 Name == "avx512.pmul.dq.512" || 1704 Name.startswith("avx512.mask.pmul.dq."))) { 1705 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true); 1706 } else if (IsX86 && (Name == "sse.cvtsi2ss" || 1707 Name == "sse2.cvtsi2sd" || 1708 Name == "sse.cvtsi642ss" || 1709 Name == "sse2.cvtsi642sd")) { 1710 Rep = Builder.CreateSIToFP(CI->getArgOperand(1), 1711 CI->getType()->getVectorElementType()); 1712 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1713 } else if (IsX86 && Name == "avx512.cvtusi2sd") { 1714 Rep = Builder.CreateUIToFP(CI->getArgOperand(1), 1715 CI->getType()->getVectorElementType()); 1716 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1717 } else if (IsX86 && Name == "sse2.cvtss2sd") { 1718 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0); 1719 Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType()); 1720 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1721 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 1722 Name == "sse2.cvtdq2ps" || 1723 Name == "avx.cvtdq2.pd.256" || 1724 Name == "avx.cvtdq2.ps.256" || 1725 Name.startswith("avx512.mask.cvtdq2pd.") || 1726 Name.startswith("avx512.mask.cvtudq2pd.") || 1727 Name == "avx512.mask.cvtdq2ps.128" || 1728 Name == "avx512.mask.cvtdq2ps.256" || 1729 Name == "avx512.mask.cvtudq2ps.128" || 1730 Name == "avx512.mask.cvtudq2ps.256" || 1731 Name == "avx512.mask.cvtqq2pd.128" || 1732 Name == "avx512.mask.cvtqq2pd.256" || 1733 Name == "avx512.mask.cvtuqq2pd.128" || 1734 Name == "avx512.mask.cvtuqq2pd.256" || 1735 Name == "sse2.cvtps2pd" || 1736 Name == "avx.cvt.ps2.pd.256" || 1737 Name == "avx512.mask.cvtps2pd.128" || 1738 Name == "avx512.mask.cvtps2pd.256")) { 1739 Type *DstTy = CI->getType(); 1740 Rep = CI->getArgOperand(0); 1741 1742 unsigned NumDstElts = DstTy->getVectorNumElements(); 1743 if (NumDstElts < Rep->getType()->getVectorNumElements()) { 1744 assert(NumDstElts == 2 && "Unexpected vector size"); 1745 uint32_t ShuffleMask[2] = { 0, 1 }; 1746 Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); 1747 } 1748 1749 bool IsPS2PD = (StringRef::npos != Name.find("ps2")); 1750 bool IsUnsigned = (StringRef::npos != Name.find("cvtu")); 1751 if (IsPS2PD) 1752 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 1753 else if (IsUnsigned) 1754 Rep = Builder.CreateUIToFP(Rep, DstTy, "cvt"); 1755 else 1756 Rep = Builder.CreateSIToFP(Rep, DstTy, "cvt"); 1757 1758 if (CI->getNumArgOperands() == 3) 1759 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1760 CI->getArgOperand(1)); 1761 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 1762 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1763 CI->getArgOperand(1), CI->getArgOperand(2), 1764 /*Aligned*/false); 1765 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 1766 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1767 CI->getArgOperand(1),CI->getArgOperand(2), 1768 /*Aligned*/true); 1769 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { 1770 Type *ResultTy = CI->getType(); 1771 Type *PtrTy = ResultTy->getVectorElementType(); 1772 1773 // Cast the pointer to element type. 1774 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 1775 llvm::PointerType::getUnqual(PtrTy)); 1776 1777 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 1778 ResultTy->getVectorNumElements()); 1779 1780 Function *ELd = Intrinsic::getDeclaration(F->getParent(), 1781 Intrinsic::masked_expandload, 1782 ResultTy); 1783 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); 1784 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { 1785 Type *ResultTy = CI->getArgOperand(1)->getType(); 1786 Type *PtrTy = ResultTy->getVectorElementType(); 1787 1788 // Cast the pointer to element type. 1789 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 1790 llvm::PointerType::getUnqual(PtrTy)); 1791 1792 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 1793 ResultTy->getVectorNumElements()); 1794 1795 Function *CSt = Intrinsic::getDeclaration(F->getParent(), 1796 Intrinsic::masked_compressstore, 1797 ResultTy); 1798 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); 1799 } else if (IsX86 && Name.startswith("xop.vpcom")) { 1800 Intrinsic::ID intID; 1801 if (Name.endswith("ub")) 1802 intID = Intrinsic::x86_xop_vpcomub; 1803 else if (Name.endswith("uw")) 1804 intID = Intrinsic::x86_xop_vpcomuw; 1805 else if (Name.endswith("ud")) 1806 intID = Intrinsic::x86_xop_vpcomud; 1807 else if (Name.endswith("uq")) 1808 intID = Intrinsic::x86_xop_vpcomuq; 1809 else if (Name.endswith("b")) 1810 intID = Intrinsic::x86_xop_vpcomb; 1811 else if (Name.endswith("w")) 1812 intID = Intrinsic::x86_xop_vpcomw; 1813 else if (Name.endswith("d")) 1814 intID = Intrinsic::x86_xop_vpcomd; 1815 else if (Name.endswith("q")) 1816 intID = Intrinsic::x86_xop_vpcomq; 1817 else 1818 llvm_unreachable("Unknown suffix"); 1819 1820 Name = Name.substr(9); // strip off "xop.vpcom" 1821 unsigned Imm; 1822 if (Name.startswith("lt")) 1823 Imm = 0; 1824 else if (Name.startswith("le")) 1825 Imm = 1; 1826 else if (Name.startswith("gt")) 1827 Imm = 2; 1828 else if (Name.startswith("ge")) 1829 Imm = 3; 1830 else if (Name.startswith("eq")) 1831 Imm = 4; 1832 else if (Name.startswith("ne")) 1833 Imm = 5; 1834 else if (Name.startswith("false")) 1835 Imm = 6; 1836 else if (Name.startswith("true")) 1837 Imm = 7; 1838 else 1839 llvm_unreachable("Unknown condition"); 1840 1841 Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID); 1842 Rep = 1843 Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1), 1844 Builder.getInt8(Imm)}); 1845 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 1846 Value *Sel = CI->getArgOperand(2); 1847 Value *NotSel = Builder.CreateNot(Sel); 1848 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 1849 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 1850 Rep = Builder.CreateOr(Sel0, Sel1); 1851 } else if (IsX86 && Name == "sse42.crc32.64.8") { 1852 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 1853 Intrinsic::x86_sse42_crc32_32_8); 1854 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 1855 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 1856 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 1857 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || 1858 Name.startswith("avx512.vbroadcast.s"))) { 1859 // Replace broadcasts with a series of insertelements. 1860 Type *VecTy = CI->getType(); 1861 Type *EltTy = VecTy->getVectorElementType(); 1862 unsigned EltNum = VecTy->getVectorNumElements(); 1863 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 1864 EltTy->getPointerTo()); 1865 Value *Load = Builder.CreateLoad(EltTy, Cast); 1866 Type *I32Ty = Type::getInt32Ty(C); 1867 Rep = UndefValue::get(VecTy); 1868 for (unsigned I = 0; I < EltNum; ++I) 1869 Rep = Builder.CreateInsertElement(Rep, Load, 1870 ConstantInt::get(I32Ty, I)); 1871 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 1872 Name.startswith("sse41.pmovzx") || 1873 Name.startswith("avx2.pmovsx") || 1874 Name.startswith("avx2.pmovzx") || 1875 Name.startswith("avx512.mask.pmovsx") || 1876 Name.startswith("avx512.mask.pmovzx"))) { 1877 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 1878 VectorType *DstTy = cast<VectorType>(CI->getType()); 1879 unsigned NumDstElts = DstTy->getNumElements(); 1880 1881 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 1882 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 1883 for (unsigned i = 0; i != NumDstElts; ++i) 1884 ShuffleMask[i] = i; 1885 1886 Value *SV = Builder.CreateShuffleVector( 1887 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 1888 1889 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 1890 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 1891 : Builder.CreateZExt(SV, DstTy); 1892 // If there are 3 arguments, it's a masked intrinsic so we need a select. 1893 if (CI->getNumArgOperands() == 3) 1894 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1895 CI->getArgOperand(1)); 1896 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 1897 Name == "avx2.vbroadcasti128")) { 1898 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 1899 Type *EltTy = CI->getType()->getVectorElementType(); 1900 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 1901 Type *VT = VectorType::get(EltTy, NumSrcElts); 1902 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 1903 PointerType::getUnqual(VT)); 1904 Value *Load = Builder.CreateAlignedLoad(Op, 1); 1905 if (NumSrcElts == 2) 1906 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 1907 { 0, 1, 0, 1 }); 1908 else 1909 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 1910 { 0, 1, 2, 3, 0, 1, 2, 3 }); 1911 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") || 1912 Name.startswith("avx512.mask.shuf.f"))) { 1913 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1914 Type *VT = CI->getType(); 1915 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128; 1916 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits(); 1917 unsigned ControlBitsMask = NumLanes - 1; 1918 unsigned NumControlBits = NumLanes / 2; 1919 SmallVector<uint32_t, 8> ShuffleMask(0); 1920 1921 for (unsigned l = 0; l != NumLanes; ++l) { 1922 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask; 1923 // We actually need the other source. 1924 if (l >= NumLanes / 2) 1925 LaneMask += NumLanes; 1926 for (unsigned i = 0; i != NumElementsInLane; ++i) 1927 ShuffleMask.push_back(LaneMask * NumElementsInLane + i); 1928 } 1929 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 1930 CI->getArgOperand(1), ShuffleMask); 1931 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 1932 CI->getArgOperand(3)); 1933 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || 1934 Name.startswith("avx512.mask.broadcasti"))) { 1935 unsigned NumSrcElts = 1936 CI->getArgOperand(0)->getType()->getVectorNumElements(); 1937 unsigned NumDstElts = CI->getType()->getVectorNumElements(); 1938 1939 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 1940 for (unsigned i = 0; i != NumDstElts; ++i) 1941 ShuffleMask[i] = i % NumSrcElts; 1942 1943 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 1944 CI->getArgOperand(0), 1945 ShuffleMask); 1946 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1947 CI->getArgOperand(1)); 1948 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 1949 Name.startswith("avx2.vbroadcast") || 1950 Name.startswith("avx512.pbroadcast") || 1951 Name.startswith("avx512.mask.broadcast.s"))) { 1952 // Replace vp?broadcasts with a vector shuffle. 1953 Value *Op = CI->getArgOperand(0); 1954 unsigned NumElts = CI->getType()->getVectorNumElements(); 1955 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 1956 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 1957 Constant::getNullValue(MaskTy)); 1958 1959 if (CI->getNumArgOperands() == 3) 1960 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1961 CI->getArgOperand(1)); 1962 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 1963 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 1964 CI->getArgOperand(1), 1965 CI->getArgOperand(2), 1966 CI->getArgOperand(3), 1967 CI->getArgOperand(4), 1968 false); 1969 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 1970 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 1971 CI->getArgOperand(1), 1972 CI->getArgOperand(2), 1973 CI->getArgOperand(3), 1974 CI->getArgOperand(4), 1975 true); 1976 } else if (IsX86 && (Name == "sse2.psll.dq" || 1977 Name == "avx2.psll.dq")) { 1978 // 128/256-bit shift left specified in bits. 1979 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1980 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 1981 Shift / 8); // Shift is in bits. 1982 } else if (IsX86 && (Name == "sse2.psrl.dq" || 1983 Name == "avx2.psrl.dq")) { 1984 // 128/256-bit shift right specified in bits. 1985 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1986 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 1987 Shift / 8); // Shift is in bits. 1988 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 1989 Name == "avx2.psll.dq.bs" || 1990 Name == "avx512.psll.dq.512")) { 1991 // 128/256/512-bit shift left specified in bytes. 1992 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1993 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 1994 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 1995 Name == "avx2.psrl.dq.bs" || 1996 Name == "avx512.psrl.dq.512")) { 1997 // 128/256/512-bit shift right specified in bytes. 1998 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 1999 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2000 } else if (IsX86 && (Name == "sse41.pblendw" || 2001 Name.startswith("sse41.blendp") || 2002 Name.startswith("avx.blend.p") || 2003 Name == "avx2.pblendw" || 2004 Name.startswith("avx2.pblendd."))) { 2005 Value *Op0 = CI->getArgOperand(0); 2006 Value *Op1 = CI->getArgOperand(1); 2007 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2008 VectorType *VecTy = cast<VectorType>(CI->getType()); 2009 unsigned NumElts = VecTy->getNumElements(); 2010 2011 SmallVector<uint32_t, 16> Idxs(NumElts); 2012 for (unsigned i = 0; i != NumElts; ++i) 2013 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 2014 2015 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2016 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 2017 Name == "avx2.vinserti128" || 2018 Name.startswith("avx512.mask.insert"))) { 2019 Value *Op0 = CI->getArgOperand(0); 2020 Value *Op1 = CI->getArgOperand(1); 2021 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2022 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2023 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 2024 unsigned Scale = DstNumElts / SrcNumElts; 2025 2026 // Mask off the high bits of the immediate value; hardware ignores those. 2027 Imm = Imm % Scale; 2028 2029 // Extend the second operand into a vector the size of the destination. 2030 Value *UndefV = UndefValue::get(Op1->getType()); 2031 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2032 for (unsigned i = 0; i != SrcNumElts; ++i) 2033 Idxs[i] = i; 2034 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 2035 Idxs[i] = SrcNumElts; 2036 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 2037 2038 // Insert the second operand into the first operand. 2039 2040 // Note that there is no guarantee that instruction lowering will actually 2041 // produce a vinsertf128 instruction for the created shuffles. In 2042 // particular, the 0 immediate case involves no lane changes, so it can 2043 // be handled as a blend. 2044 2045 // Example of shuffle mask for 32-bit elements: 2046 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 2047 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 2048 2049 // First fill with identify mask. 2050 for (unsigned i = 0; i != DstNumElts; ++i) 2051 Idxs[i] = i; 2052 // Then replace the elements where we need to insert. 2053 for (unsigned i = 0; i != SrcNumElts; ++i) 2054 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 2055 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 2056 2057 // If the intrinsic has a mask operand, handle that. 2058 if (CI->getNumArgOperands() == 5) 2059 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2060 CI->getArgOperand(3)); 2061 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 2062 Name == "avx2.vextracti128" || 2063 Name.startswith("avx512.mask.vextract"))) { 2064 Value *Op0 = CI->getArgOperand(0); 2065 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2066 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2067 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 2068 unsigned Scale = SrcNumElts / DstNumElts; 2069 2070 // Mask off the high bits of the immediate value; hardware ignores those. 2071 Imm = Imm % Scale; 2072 2073 // Get indexes for the subvector of the input vector. 2074 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2075 for (unsigned i = 0; i != DstNumElts; ++i) { 2076 Idxs[i] = i + (Imm * DstNumElts); 2077 } 2078 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2079 2080 // If the intrinsic has a mask operand, handle that. 2081 if (CI->getNumArgOperands() == 4) 2082 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2083 CI->getArgOperand(2)); 2084 } else if (!IsX86 && Name == "stackprotectorcheck") { 2085 Rep = nullptr; 2086 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 2087 Name.startswith("avx512.mask.perm.di."))) { 2088 Value *Op0 = CI->getArgOperand(0); 2089 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2090 VectorType *VecTy = cast<VectorType>(CI->getType()); 2091 unsigned NumElts = VecTy->getNumElements(); 2092 2093 SmallVector<uint32_t, 8> Idxs(NumElts); 2094 for (unsigned i = 0; i != NumElts; ++i) 2095 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 2096 2097 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2098 2099 if (CI->getNumArgOperands() == 4) 2100 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2101 CI->getArgOperand(2)); 2102 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") || 2103 Name == "avx2.vperm2i128")) { 2104 // The immediate permute control byte looks like this: 2105 // [1:0] - select 128 bits from sources for low half of destination 2106 // [2] - ignore 2107 // [3] - zero low half of destination 2108 // [5:4] - select 128 bits from sources for high half of destination 2109 // [6] - ignore 2110 // [7] - zero high half of destination 2111 2112 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2113 2114 unsigned NumElts = CI->getType()->getVectorNumElements(); 2115 unsigned HalfSize = NumElts / 2; 2116 SmallVector<uint32_t, 8> ShuffleMask(NumElts); 2117 2118 // Determine which operand(s) are actually in use for this instruction. 2119 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2120 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2121 2122 // If needed, replace operands based on zero mask. 2123 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0; 2124 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1; 2125 2126 // Permute low half of result. 2127 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0; 2128 for (unsigned i = 0; i < HalfSize; ++i) 2129 ShuffleMask[i] = StartIndex + i; 2130 2131 // Permute high half of result. 2132 StartIndex = (Imm & 0x10) ? HalfSize : 0; 2133 for (unsigned i = 0; i < HalfSize; ++i) 2134 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i; 2135 2136 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2137 2138 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 2139 Name == "sse2.pshuf.d" || 2140 Name.startswith("avx512.mask.vpermil.p") || 2141 Name.startswith("avx512.mask.pshuf.d."))) { 2142 Value *Op0 = CI->getArgOperand(0); 2143 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2144 VectorType *VecTy = cast<VectorType>(CI->getType()); 2145 unsigned NumElts = VecTy->getNumElements(); 2146 // Calculate the size of each index in the immediate. 2147 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 2148 unsigned IdxMask = ((1 << IdxSize) - 1); 2149 2150 SmallVector<uint32_t, 8> Idxs(NumElts); 2151 // Lookup the bits for this element, wrapping around the immediate every 2152 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 2153 // to offset by the first index of each group. 2154 for (unsigned i = 0; i != NumElts; ++i) 2155 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 2156 2157 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2158 2159 if (CI->getNumArgOperands() == 4) 2160 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2161 CI->getArgOperand(2)); 2162 } else if (IsX86 && (Name == "sse2.pshufl.w" || 2163 Name.startswith("avx512.mask.pshufl.w."))) { 2164 Value *Op0 = CI->getArgOperand(0); 2165 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2166 unsigned NumElts = CI->getType()->getVectorNumElements(); 2167 2168 SmallVector<uint32_t, 16> Idxs(NumElts); 2169 for (unsigned l = 0; l != NumElts; l += 8) { 2170 for (unsigned i = 0; i != 4; ++i) 2171 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 2172 for (unsigned i = 4; i != 8; ++i) 2173 Idxs[i + l] = i + l; 2174 } 2175 2176 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2177 2178 if (CI->getNumArgOperands() == 4) 2179 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2180 CI->getArgOperand(2)); 2181 } else if (IsX86 && (Name == "sse2.pshufh.w" || 2182 Name.startswith("avx512.mask.pshufh.w."))) { 2183 Value *Op0 = CI->getArgOperand(0); 2184 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2185 unsigned NumElts = CI->getType()->getVectorNumElements(); 2186 2187 SmallVector<uint32_t, 16> Idxs(NumElts); 2188 for (unsigned l = 0; l != NumElts; l += 8) { 2189 for (unsigned i = 0; i != 4; ++i) 2190 Idxs[i + l] = i + l; 2191 for (unsigned i = 0; i != 4; ++i) 2192 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 2193 } 2194 2195 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2196 2197 if (CI->getNumArgOperands() == 4) 2198 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2199 CI->getArgOperand(2)); 2200 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 2201 Value *Op0 = CI->getArgOperand(0); 2202 Value *Op1 = CI->getArgOperand(1); 2203 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2204 unsigned NumElts = CI->getType()->getVectorNumElements(); 2205 2206 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2207 unsigned HalfLaneElts = NumLaneElts / 2; 2208 2209 SmallVector<uint32_t, 16> Idxs(NumElts); 2210 for (unsigned i = 0; i != NumElts; ++i) { 2211 // Base index is the starting element of the lane. 2212 Idxs[i] = i - (i % NumLaneElts); 2213 // If we are half way through the lane switch to the other source. 2214 if ((i % NumLaneElts) >= HalfLaneElts) 2215 Idxs[i] += NumElts; 2216 // Now select the specific element. By adding HalfLaneElts bits from 2217 // the immediate. Wrapping around the immediate every 8-bits. 2218 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 2219 } 2220 2221 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2222 2223 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2224 CI->getArgOperand(3)); 2225 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 2226 Name.startswith("avx512.mask.movshdup") || 2227 Name.startswith("avx512.mask.movsldup"))) { 2228 Value *Op0 = CI->getArgOperand(0); 2229 unsigned NumElts = CI->getType()->getVectorNumElements(); 2230 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2231 2232 unsigned Offset = 0; 2233 if (Name.startswith("avx512.mask.movshdup.")) 2234 Offset = 1; 2235 2236 SmallVector<uint32_t, 16> Idxs(NumElts); 2237 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 2238 for (unsigned i = 0; i != NumLaneElts; i += 2) { 2239 Idxs[i + l + 0] = i + l + Offset; 2240 Idxs[i + l + 1] = i + l + Offset; 2241 } 2242 2243 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2244 2245 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2246 CI->getArgOperand(1)); 2247 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 2248 Name.startswith("avx512.mask.unpckl."))) { 2249 Value *Op0 = CI->getArgOperand(0); 2250 Value *Op1 = CI->getArgOperand(1); 2251 int NumElts = CI->getType()->getVectorNumElements(); 2252 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2253 2254 SmallVector<uint32_t, 64> Idxs(NumElts); 2255 for (int l = 0; l != NumElts; l += NumLaneElts) 2256 for (int i = 0; i != NumLaneElts; ++i) 2257 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 2258 2259 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2260 2261 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2262 CI->getArgOperand(2)); 2263 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 2264 Name.startswith("avx512.mask.unpckh."))) { 2265 Value *Op0 = CI->getArgOperand(0); 2266 Value *Op1 = CI->getArgOperand(1); 2267 int NumElts = CI->getType()->getVectorNumElements(); 2268 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2269 2270 SmallVector<uint32_t, 64> Idxs(NumElts); 2271 for (int l = 0; l != NumElts; l += NumLaneElts) 2272 for (int i = 0; i != NumLaneElts; ++i) 2273 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 2274 2275 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2276 2277 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2278 CI->getArgOperand(2)); 2279 } else if (IsX86 && Name.startswith("avx512.mask.pand.")) { 2280 Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1)); 2281 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2282 CI->getArgOperand(2)); 2283 } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) { 2284 Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)), 2285 CI->getArgOperand(1)); 2286 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2287 CI->getArgOperand(2)); 2288 } else if (IsX86 && Name.startswith("avx512.mask.por.")) { 2289 Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1)); 2290 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2291 CI->getArgOperand(2)); 2292 } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) { 2293 Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1)); 2294 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2295 CI->getArgOperand(2)); 2296 } else if (IsX86 && Name.startswith("avx512.mask.and.")) { 2297 VectorType *FTy = cast<VectorType>(CI->getType()); 2298 VectorType *ITy = VectorType::getInteger(FTy); 2299 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2300 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2301 Rep = Builder.CreateBitCast(Rep, FTy); 2302 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2303 CI->getArgOperand(2)); 2304 } else if (IsX86 && Name.startswith("avx512.mask.andn.")) { 2305 VectorType *FTy = cast<VectorType>(CI->getType()); 2306 VectorType *ITy = VectorType::getInteger(FTy); 2307 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 2308 Rep = Builder.CreateAnd(Rep, 2309 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2310 Rep = Builder.CreateBitCast(Rep, FTy); 2311 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2312 CI->getArgOperand(2)); 2313 } else if (IsX86 && Name.startswith("avx512.mask.or.")) { 2314 VectorType *FTy = cast<VectorType>(CI->getType()); 2315 VectorType *ITy = VectorType::getInteger(FTy); 2316 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2317 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2318 Rep = Builder.CreateBitCast(Rep, FTy); 2319 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2320 CI->getArgOperand(2)); 2321 } else if (IsX86 && Name.startswith("avx512.mask.xor.")) { 2322 VectorType *FTy = cast<VectorType>(CI->getType()); 2323 VectorType *ITy = VectorType::getInteger(FTy); 2324 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2325 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2326 Rep = Builder.CreateBitCast(Rep, FTy); 2327 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2328 CI->getArgOperand(2)); 2329 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 2330 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2331 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2332 CI->getArgOperand(2)); 2333 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 2334 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2335 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2336 CI->getArgOperand(2)); 2337 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 2338 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2339 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2340 CI->getArgOperand(2)); 2341 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) { 2342 if (Name.endswith(".512")) { 2343 Intrinsic::ID IID; 2344 if (Name[17] == 's') 2345 IID = Intrinsic::x86_avx512_add_ps_512; 2346 else 2347 IID = Intrinsic::x86_avx512_add_pd_512; 2348 2349 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2350 { CI->getArgOperand(0), CI->getArgOperand(1), 2351 CI->getArgOperand(4) }); 2352 } else { 2353 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2354 } 2355 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2356 CI->getArgOperand(2)); 2357 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 2358 if (Name.endswith(".512")) { 2359 Intrinsic::ID IID; 2360 if (Name[17] == 's') 2361 IID = Intrinsic::x86_avx512_div_ps_512; 2362 else 2363 IID = Intrinsic::x86_avx512_div_pd_512; 2364 2365 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2366 { CI->getArgOperand(0), CI->getArgOperand(1), 2367 CI->getArgOperand(4) }); 2368 } else { 2369 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 2370 } 2371 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2372 CI->getArgOperand(2)); 2373 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 2374 if (Name.endswith(".512")) { 2375 Intrinsic::ID IID; 2376 if (Name[17] == 's') 2377 IID = Intrinsic::x86_avx512_mul_ps_512; 2378 else 2379 IID = Intrinsic::x86_avx512_mul_pd_512; 2380 2381 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2382 { CI->getArgOperand(0), CI->getArgOperand(1), 2383 CI->getArgOperand(4) }); 2384 } else { 2385 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2386 } 2387 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2388 CI->getArgOperand(2)); 2389 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 2390 if (Name.endswith(".512")) { 2391 Intrinsic::ID IID; 2392 if (Name[17] == 's') 2393 IID = Intrinsic::x86_avx512_sub_ps_512; 2394 else 2395 IID = Intrinsic::x86_avx512_sub_pd_512; 2396 2397 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2398 { CI->getArgOperand(0), CI->getArgOperand(1), 2399 CI->getArgOperand(4) }); 2400 } else { 2401 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2402 } 2403 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2404 CI->getArgOperand(2)); 2405 } else if (IsX86 && Name.startswith("avx512.mask.max.p") && 2406 Name.drop_front(18) == ".512") { 2407 Intrinsic::ID IID; 2408 if (Name[17] == 's') 2409 IID = Intrinsic::x86_avx512_max_ps_512; 2410 else 2411 IID = Intrinsic::x86_avx512_max_pd_512; 2412 2413 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2414 { CI->getArgOperand(0), CI->getArgOperand(1), 2415 CI->getArgOperand(4) }); 2416 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2417 CI->getArgOperand(2)); 2418 } else if (IsX86 && Name.startswith("avx512.mask.min.p") && 2419 Name.drop_front(18) == ".512") { 2420 Intrinsic::ID IID; 2421 if (Name[17] == 's') 2422 IID = Intrinsic::x86_avx512_min_ps_512; 2423 else 2424 IID = Intrinsic::x86_avx512_min_pd_512; 2425 2426 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2427 { CI->getArgOperand(0), CI->getArgOperand(1), 2428 CI->getArgOperand(4) }); 2429 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2430 CI->getArgOperand(2)); 2431 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 2432 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 2433 Intrinsic::ctlz, 2434 CI->getType()), 2435 { CI->getArgOperand(0), Builder.getInt1(false) }); 2436 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2437 CI->getArgOperand(1)); 2438 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 2439 bool IsImmediate = Name[16] == 'i' || 2440 (Name.size() > 18 && Name[18] == 'i'); 2441 bool IsVariable = Name[16] == 'v'; 2442 char Size = Name[16] == '.' ? Name[17] : 2443 Name[17] == '.' ? Name[18] : 2444 Name[18] == '.' ? Name[19] : 2445 Name[20]; 2446 2447 Intrinsic::ID IID; 2448 if (IsVariable && Name[17] != '.') { 2449 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 2450 IID = Intrinsic::x86_avx2_psllv_q; 2451 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 2452 IID = Intrinsic::x86_avx2_psllv_q_256; 2453 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 2454 IID = Intrinsic::x86_avx2_psllv_d; 2455 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 2456 IID = Intrinsic::x86_avx2_psllv_d_256; 2457 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 2458 IID = Intrinsic::x86_avx512_psllv_w_128; 2459 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 2460 IID = Intrinsic::x86_avx512_psllv_w_256; 2461 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 2462 IID = Intrinsic::x86_avx512_psllv_w_512; 2463 else 2464 llvm_unreachable("Unexpected size"); 2465 } else if (Name.endswith(".128")) { 2466 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 2467 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 2468 : Intrinsic::x86_sse2_psll_d; 2469 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 2470 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 2471 : Intrinsic::x86_sse2_psll_q; 2472 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 2473 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 2474 : Intrinsic::x86_sse2_psll_w; 2475 else 2476 llvm_unreachable("Unexpected size"); 2477 } else if (Name.endswith(".256")) { 2478 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 2479 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 2480 : Intrinsic::x86_avx2_psll_d; 2481 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 2482 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 2483 : Intrinsic::x86_avx2_psll_q; 2484 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 2485 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 2486 : Intrinsic::x86_avx2_psll_w; 2487 else 2488 llvm_unreachable("Unexpected size"); 2489 } else { 2490 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 2491 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 2492 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 2493 Intrinsic::x86_avx512_psll_d_512; 2494 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 2495 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 2496 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 2497 Intrinsic::x86_avx512_psll_q_512; 2498 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 2499 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 2500 : Intrinsic::x86_avx512_psll_w_512; 2501 else 2502 llvm_unreachable("Unexpected size"); 2503 } 2504 2505 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2506 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 2507 bool IsImmediate = Name[16] == 'i' || 2508 (Name.size() > 18 && Name[18] == 'i'); 2509 bool IsVariable = Name[16] == 'v'; 2510 char Size = Name[16] == '.' ? Name[17] : 2511 Name[17] == '.' ? Name[18] : 2512 Name[18] == '.' ? Name[19] : 2513 Name[20]; 2514 2515 Intrinsic::ID IID; 2516 if (IsVariable && Name[17] != '.') { 2517 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 2518 IID = Intrinsic::x86_avx2_psrlv_q; 2519 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 2520 IID = Intrinsic::x86_avx2_psrlv_q_256; 2521 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 2522 IID = Intrinsic::x86_avx2_psrlv_d; 2523 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 2524 IID = Intrinsic::x86_avx2_psrlv_d_256; 2525 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 2526 IID = Intrinsic::x86_avx512_psrlv_w_128; 2527 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 2528 IID = Intrinsic::x86_avx512_psrlv_w_256; 2529 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 2530 IID = Intrinsic::x86_avx512_psrlv_w_512; 2531 else 2532 llvm_unreachable("Unexpected size"); 2533 } else if (Name.endswith(".128")) { 2534 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 2535 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 2536 : Intrinsic::x86_sse2_psrl_d; 2537 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 2538 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 2539 : Intrinsic::x86_sse2_psrl_q; 2540 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 2541 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 2542 : Intrinsic::x86_sse2_psrl_w; 2543 else 2544 llvm_unreachable("Unexpected size"); 2545 } else if (Name.endswith(".256")) { 2546 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 2547 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 2548 : Intrinsic::x86_avx2_psrl_d; 2549 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 2550 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 2551 : Intrinsic::x86_avx2_psrl_q; 2552 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 2553 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 2554 : Intrinsic::x86_avx2_psrl_w; 2555 else 2556 llvm_unreachable("Unexpected size"); 2557 } else { 2558 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 2559 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 2560 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 2561 Intrinsic::x86_avx512_psrl_d_512; 2562 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 2563 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 2564 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 2565 Intrinsic::x86_avx512_psrl_q_512; 2566 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 2567 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 2568 : Intrinsic::x86_avx512_psrl_w_512; 2569 else 2570 llvm_unreachable("Unexpected size"); 2571 } 2572 2573 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2574 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 2575 bool IsImmediate = Name[16] == 'i' || 2576 (Name.size() > 18 && Name[18] == 'i'); 2577 bool IsVariable = Name[16] == 'v'; 2578 char Size = Name[16] == '.' ? Name[17] : 2579 Name[17] == '.' ? Name[18] : 2580 Name[18] == '.' ? Name[19] : 2581 Name[20]; 2582 2583 Intrinsic::ID IID; 2584 if (IsVariable && Name[17] != '.') { 2585 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 2586 IID = Intrinsic::x86_avx2_psrav_d; 2587 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 2588 IID = Intrinsic::x86_avx2_psrav_d_256; 2589 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 2590 IID = Intrinsic::x86_avx512_psrav_w_128; 2591 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 2592 IID = Intrinsic::x86_avx512_psrav_w_256; 2593 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 2594 IID = Intrinsic::x86_avx512_psrav_w_512; 2595 else 2596 llvm_unreachable("Unexpected size"); 2597 } else if (Name.endswith(".128")) { 2598 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 2599 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 2600 : Intrinsic::x86_sse2_psra_d; 2601 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 2602 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 2603 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 2604 Intrinsic::x86_avx512_psra_q_128; 2605 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 2606 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 2607 : Intrinsic::x86_sse2_psra_w; 2608 else 2609 llvm_unreachable("Unexpected size"); 2610 } else if (Name.endswith(".256")) { 2611 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 2612 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 2613 : Intrinsic::x86_avx2_psra_d; 2614 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 2615 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 2616 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 2617 Intrinsic::x86_avx512_psra_q_256; 2618 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 2619 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 2620 : Intrinsic::x86_avx2_psra_w; 2621 else 2622 llvm_unreachable("Unexpected size"); 2623 } else { 2624 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 2625 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 2626 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 2627 Intrinsic::x86_avx512_psra_d_512; 2628 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 2629 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 2630 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 2631 Intrinsic::x86_avx512_psra_q_512; 2632 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 2633 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 2634 : Intrinsic::x86_avx512_psra_w_512; 2635 else 2636 llvm_unreachable("Unexpected size"); 2637 } 2638 2639 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2640 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 2641 Rep = upgradeMaskedMove(Builder, *CI); 2642 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 2643 Rep = UpgradeMaskToInt(Builder, *CI); 2644 } else if (IsX86 && Name.endswith(".movntdqa")) { 2645 Module *M = F->getParent(); 2646 MDNode *Node = MDNode::get( 2647 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 2648 2649 Value *Ptr = CI->getArgOperand(0); 2650 VectorType *VTy = cast<VectorType>(CI->getType()); 2651 2652 // Convert the type of the pointer to a pointer to the stored type. 2653 Value *BC = 2654 Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); 2655 LoadInst *LI = Builder.CreateAlignedLoad(BC, VTy->getBitWidth() / 8); 2656 LI->setMetadata(M->getMDKindID("nontemporal"), Node); 2657 Rep = LI; 2658 } else if (IsX86 && 2659 (Name.startswith("sse2.pavg") || Name.startswith("avx2.pavg") || 2660 Name.startswith("avx512.mask.pavg"))) { 2661 // llvm.x86.sse2.pavg.b/w, llvm.x86.avx2.pavg.b/w, 2662 // llvm.x86.avx512.mask.pavg.b/w 2663 Value *A = CI->getArgOperand(0); 2664 Value *B = CI->getArgOperand(1); 2665 VectorType *ZextType = VectorType::getExtendedElementVectorType( 2666 cast<VectorType>(A->getType())); 2667 Value *ExtendedA = Builder.CreateZExt(A, ZextType); 2668 Value *ExtendedB = Builder.CreateZExt(B, ZextType); 2669 Value *Sum = Builder.CreateAdd(ExtendedA, ExtendedB); 2670 Value *AddOne = Builder.CreateAdd(Sum, ConstantInt::get(ZextType, 1)); 2671 Value *ShiftR = Builder.CreateLShr(AddOne, ConstantInt::get(ZextType, 1)); 2672 Rep = Builder.CreateTrunc(ShiftR, A->getType()); 2673 if (CI->getNumArgOperands() > 2) { 2674 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2675 CI->getArgOperand(2)); 2676 } 2677 } else if (IsX86 && Name.startswith("fma.vfmsub")) { 2678 // Handle FMSUB and FSUBADD. 2679 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 2680 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 2681 Intrinsic::ID IID; 2682 if (Name[10] == '.' && Name[11] == 'p') { 2683 // Packed FMSUB 2684 if (VecWidth == 128 && EltWidth == 32) 2685 IID = Intrinsic::x86_fma_vfmadd_ps; 2686 else if (VecWidth == 128 && EltWidth == 64) 2687 IID = Intrinsic::x86_fma_vfmadd_pd; 2688 else if (VecWidth == 256 && EltWidth == 32) 2689 IID = Intrinsic::x86_fma_vfmadd_ps_256; 2690 else if (VecWidth == 256 && EltWidth == 64) 2691 IID = Intrinsic::x86_fma_vfmadd_pd_256; 2692 else 2693 llvm_unreachable("Unexpected intrinsic"); 2694 } else if (Name[10] == '.' && Name[11] == 's') { 2695 // Scalar FMSUB 2696 if (EltWidth == 32) 2697 IID = Intrinsic::x86_fma_vfmadd_ss; 2698 else if (EltWidth == 64) 2699 IID = Intrinsic::x86_fma_vfmadd_sd; 2700 else 2701 llvm_unreachable("Unexpected intrinsic"); 2702 } else { 2703 // FMSUBADD 2704 if (VecWidth == 128 && EltWidth == 32) 2705 IID = Intrinsic::x86_fma_vfmaddsub_ps; 2706 else if (VecWidth == 128 && EltWidth == 64) 2707 IID = Intrinsic::x86_fma_vfmaddsub_pd; 2708 else if (VecWidth == 256 && EltWidth == 32) 2709 IID = Intrinsic::x86_fma_vfmaddsub_ps_256; 2710 else if (VecWidth == 256 && EltWidth == 64) 2711 IID = Intrinsic::x86_fma_vfmaddsub_pd_256; 2712 else 2713 llvm_unreachable("Unexpected intrinsic"); 2714 } 2715 Value *Arg2 = Builder.CreateFNeg(CI->getArgOperand(2)); 2716 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), Arg2 }; 2717 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 2718 Ops); 2719 } else if (IsX86 && (Name.startswith("fma.vfnmadd.") || 2720 Name.startswith("fma.vfnmsub."))) { 2721 Value *Arg0 = CI->getArgOperand(0); 2722 Value *Arg1 = CI->getArgOperand(1); 2723 Value *Arg2 = CI->getArgOperand(2); 2724 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 2725 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 2726 Intrinsic::ID IID; 2727 if (Name[12] == 'p') { 2728 // Packed FNMADD/FNSUB 2729 Arg0 = Builder.CreateFNeg(Arg0); 2730 if (VecWidth == 128 && EltWidth == 32) 2731 IID = Intrinsic::x86_fma_vfmadd_ps; 2732 else if (VecWidth == 128 && EltWidth == 64) 2733 IID = Intrinsic::x86_fma_vfmadd_pd; 2734 else if (VecWidth == 256 && EltWidth == 32) 2735 IID = Intrinsic::x86_fma_vfmadd_ps_256; 2736 else if (VecWidth == 256 && EltWidth == 64) 2737 IID = Intrinsic::x86_fma_vfmadd_pd_256; 2738 else 2739 llvm_unreachable("Unexpected intrinsic"); 2740 } else { 2741 // Scalar FNMADD/FNMSUB 2742 Arg1 = Builder.CreateFNeg(Arg1); // Arg0 is passthru so invert Arg1. 2743 if (EltWidth == 32) 2744 IID = Intrinsic::x86_fma_vfmadd_ss; 2745 else if (EltWidth == 64) 2746 IID = Intrinsic::x86_fma_vfmadd_sd; 2747 else 2748 llvm_unreachable("Unexpected intrinsic"); 2749 } 2750 // Invert for FNMSUB. 2751 if (Name[8] == 's') 2752 Arg2 = Builder.CreateFNeg(Arg2); 2753 Value *Ops[] = { Arg0, Arg1, Arg2 }; 2754 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 2755 Ops); 2756 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") || 2757 Name.startswith("avx512.maskz.pternlog."))) { 2758 bool ZeroMask = Name[11] == 'z'; 2759 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 2760 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 2761 Intrinsic::ID IID; 2762 if (VecWidth == 128 && EltWidth == 32) 2763 IID = Intrinsic::x86_avx512_pternlog_d_128; 2764 else if (VecWidth == 256 && EltWidth == 32) 2765 IID = Intrinsic::x86_avx512_pternlog_d_256; 2766 else if (VecWidth == 512 && EltWidth == 32) 2767 IID = Intrinsic::x86_avx512_pternlog_d_512; 2768 else if (VecWidth == 128 && EltWidth == 64) 2769 IID = Intrinsic::x86_avx512_pternlog_q_128; 2770 else if (VecWidth == 256 && EltWidth == 64) 2771 IID = Intrinsic::x86_avx512_pternlog_q_256; 2772 else if (VecWidth == 512 && EltWidth == 64) 2773 IID = Intrinsic::x86_avx512_pternlog_q_512; 2774 else 2775 llvm_unreachable("Unexpected intrinsic"); 2776 2777 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 2778 CI->getArgOperand(2), CI->getArgOperand(3) }; 2779 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 2780 Args); 2781 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 2782 : CI->getArgOperand(0); 2783 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru); 2784 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") || 2785 Name.startswith("avx512.maskz.vpmadd52"))) { 2786 bool ZeroMask = Name[11] == 'z'; 2787 bool High = Name[20] == 'h' || Name[21] == 'h'; 2788 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 2789 Intrinsic::ID IID; 2790 if (VecWidth == 128 && !High) 2791 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128; 2792 else if (VecWidth == 256 && !High) 2793 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256; 2794 else if (VecWidth == 512 && !High) 2795 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512; 2796 else if (VecWidth == 128 && High) 2797 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128; 2798 else if (VecWidth == 256 && High) 2799 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256; 2800 else if (VecWidth == 512 && High) 2801 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512; 2802 else 2803 llvm_unreachable("Unexpected intrinsic"); 2804 2805 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 2806 CI->getArgOperand(2) }; 2807 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 2808 Args); 2809 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 2810 : CI->getArgOperand(0); 2811 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 2812 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") || 2813 Name.startswith("avx512.mask.vpermt2var.") || 2814 Name.startswith("avx512.maskz.vpermt2var."))) { 2815 bool ZeroMask = Name[11] == 'z'; 2816 bool IndexForm = Name[17] == 'i'; 2817 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 2818 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 2819 bool IsFloat = CI->getType()->isFPOrFPVectorTy(); 2820 Intrinsic::ID IID; 2821 if (VecWidth == 128 && EltWidth == 32 && IsFloat) 2822 IID = Intrinsic::x86_avx512_vpermi2var_ps_128; 2823 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat) 2824 IID = Intrinsic::x86_avx512_vpermi2var_d_128; 2825 else if (VecWidth == 128 && EltWidth == 64 && IsFloat) 2826 IID = Intrinsic::x86_avx512_vpermi2var_pd_128; 2827 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat) 2828 IID = Intrinsic::x86_avx512_vpermi2var_q_128; 2829 else if (VecWidth == 256 && EltWidth == 32 && IsFloat) 2830 IID = Intrinsic::x86_avx512_vpermi2var_ps_256; 2831 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 2832 IID = Intrinsic::x86_avx512_vpermi2var_d_256; 2833 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 2834 IID = Intrinsic::x86_avx512_vpermi2var_pd_256; 2835 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 2836 IID = Intrinsic::x86_avx512_vpermi2var_q_256; 2837 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 2838 IID = Intrinsic::x86_avx512_vpermi2var_ps_512; 2839 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 2840 IID = Intrinsic::x86_avx512_vpermi2var_d_512; 2841 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 2842 IID = Intrinsic::x86_avx512_vpermi2var_pd_512; 2843 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 2844 IID = Intrinsic::x86_avx512_vpermi2var_q_512; 2845 else if (VecWidth == 128 && EltWidth == 16) 2846 IID = Intrinsic::x86_avx512_vpermi2var_hi_128; 2847 else if (VecWidth == 256 && EltWidth == 16) 2848 IID = Intrinsic::x86_avx512_vpermi2var_hi_256; 2849 else if (VecWidth == 512 && EltWidth == 16) 2850 IID = Intrinsic::x86_avx512_vpermi2var_hi_512; 2851 else if (VecWidth == 128 && EltWidth == 8) 2852 IID = Intrinsic::x86_avx512_vpermi2var_qi_128; 2853 else if (VecWidth == 256 && EltWidth == 8) 2854 IID = Intrinsic::x86_avx512_vpermi2var_qi_256; 2855 else if (VecWidth == 512 && EltWidth == 8) 2856 IID = Intrinsic::x86_avx512_vpermi2var_qi_512; 2857 else 2858 llvm_unreachable("Unexpected intrinsic"); 2859 2860 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 2861 CI->getArgOperand(2) }; 2862 2863 // If this isn't index form we need to swap operand 0 and 1. 2864 if (!IndexForm) 2865 std::swap(Args[0], Args[1]); 2866 2867 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 2868 Args); 2869 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 2870 : Builder.CreateBitCast(CI->getArgOperand(1), 2871 CI->getType()); 2872 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 2873 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") || 2874 Name.startswith("avx512.maskz.vpdpbusd.") || 2875 Name.startswith("avx512.mask.vpdpbusds.") || 2876 Name.startswith("avx512.maskz.vpdpbusds."))) { 2877 bool ZeroMask = Name[11] == 'z'; 2878 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 2879 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 2880 Intrinsic::ID IID; 2881 if (VecWidth == 128 && !IsSaturating) 2882 IID = Intrinsic::x86_avx512_vpdpbusd_128; 2883 else if (VecWidth == 256 && !IsSaturating) 2884 IID = Intrinsic::x86_avx512_vpdpbusd_256; 2885 else if (VecWidth == 512 && !IsSaturating) 2886 IID = Intrinsic::x86_avx512_vpdpbusd_512; 2887 else if (VecWidth == 128 && IsSaturating) 2888 IID = Intrinsic::x86_avx512_vpdpbusds_128; 2889 else if (VecWidth == 256 && IsSaturating) 2890 IID = Intrinsic::x86_avx512_vpdpbusds_256; 2891 else if (VecWidth == 512 && IsSaturating) 2892 IID = Intrinsic::x86_avx512_vpdpbusds_512; 2893 else 2894 llvm_unreachable("Unexpected intrinsic"); 2895 2896 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2897 CI->getArgOperand(2) }; 2898 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 2899 Args); 2900 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 2901 : CI->getArgOperand(0); 2902 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 2903 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") || 2904 Name.startswith("avx512.maskz.vpdpwssd.") || 2905 Name.startswith("avx512.mask.vpdpwssds.") || 2906 Name.startswith("avx512.maskz.vpdpwssds."))) { 2907 bool ZeroMask = Name[11] == 'z'; 2908 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 2909 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 2910 Intrinsic::ID IID; 2911 if (VecWidth == 128 && !IsSaturating) 2912 IID = Intrinsic::x86_avx512_vpdpwssd_128; 2913 else if (VecWidth == 256 && !IsSaturating) 2914 IID = Intrinsic::x86_avx512_vpdpwssd_256; 2915 else if (VecWidth == 512 && !IsSaturating) 2916 IID = Intrinsic::x86_avx512_vpdpwssd_512; 2917 else if (VecWidth == 128 && IsSaturating) 2918 IID = Intrinsic::x86_avx512_vpdpwssds_128; 2919 else if (VecWidth == 256 && IsSaturating) 2920 IID = Intrinsic::x86_avx512_vpdpwssds_256; 2921 else if (VecWidth == 512 && IsSaturating) 2922 IID = Intrinsic::x86_avx512_vpdpwssds_512; 2923 else 2924 llvm_unreachable("Unexpected intrinsic"); 2925 2926 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2927 CI->getArgOperand(2) }; 2928 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 2929 Args); 2930 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 2931 : CI->getArgOperand(0); 2932 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 2933 } else if (IsX86 && Name.startswith("avx512.mask.") && 2934 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) { 2935 // Rep will be updated by the call in the condition. 2936 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 2937 Value *Arg = CI->getArgOperand(0); 2938 Value *Neg = Builder.CreateNeg(Arg, "neg"); 2939 Value *Cmp = Builder.CreateICmpSGE( 2940 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 2941 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 2942 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 2943 Name == "max.ui" || Name == "max.ull")) { 2944 Value *Arg0 = CI->getArgOperand(0); 2945 Value *Arg1 = CI->getArgOperand(1); 2946 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 2947 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 2948 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 2949 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 2950 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 2951 Name == "min.ui" || Name == "min.ull")) { 2952 Value *Arg0 = CI->getArgOperand(0); 2953 Value *Arg1 = CI->getArgOperand(1); 2954 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 2955 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 2956 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 2957 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 2958 } else if (IsNVVM && Name == "clz.ll") { 2959 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 2960 Value *Arg = CI->getArgOperand(0); 2961 Value *Ctlz = Builder.CreateCall( 2962 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 2963 {Arg->getType()}), 2964 {Arg, Builder.getFalse()}, "ctlz"); 2965 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 2966 } else if (IsNVVM && Name == "popc.ll") { 2967 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 2968 // i64. 2969 Value *Arg = CI->getArgOperand(0); 2970 Value *Popc = Builder.CreateCall( 2971 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 2972 {Arg->getType()}), 2973 Arg, "ctpop"); 2974 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 2975 } else if (IsNVVM && Name == "h2f") { 2976 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 2977 F->getParent(), Intrinsic::convert_from_fp16, 2978 {Builder.getFloatTy()}), 2979 CI->getArgOperand(0), "h2f"); 2980 } else { 2981 llvm_unreachable("Unknown function for CallInst upgrade."); 2982 } 2983 2984 if (Rep) 2985 CI->replaceAllUsesWith(Rep); 2986 CI->eraseFromParent(); 2987 return; 2988 } 2989 2990 const auto &DefaultCase = [&NewFn, &CI]() -> void { 2991 // Handle generic mangling change, but nothing else 2992 assert( 2993 (CI->getCalledFunction()->getName() != NewFn->getName()) && 2994 "Unknown function for CallInst upgrade and isn't just a name change"); 2995 CI->setCalledFunction(NewFn); 2996 }; 2997 CallInst *NewCall = nullptr; 2998 switch (NewFn->getIntrinsicID()) { 2999 default: { 3000 DefaultCase(); 3001 return; 3002 } 3003 3004 case Intrinsic::arm_neon_vld1: 3005 case Intrinsic::arm_neon_vld2: 3006 case Intrinsic::arm_neon_vld3: 3007 case Intrinsic::arm_neon_vld4: 3008 case Intrinsic::arm_neon_vld2lane: 3009 case Intrinsic::arm_neon_vld3lane: 3010 case Intrinsic::arm_neon_vld4lane: 3011 case Intrinsic::arm_neon_vst1: 3012 case Intrinsic::arm_neon_vst2: 3013 case Intrinsic::arm_neon_vst3: 3014 case Intrinsic::arm_neon_vst4: 3015 case Intrinsic::arm_neon_vst2lane: 3016 case Intrinsic::arm_neon_vst3lane: 3017 case Intrinsic::arm_neon_vst4lane: { 3018 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3019 CI->arg_operands().end()); 3020 NewCall = Builder.CreateCall(NewFn, Args); 3021 break; 3022 } 3023 3024 case Intrinsic::bitreverse: 3025 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3026 break; 3027 3028 case Intrinsic::ctlz: 3029 case Intrinsic::cttz: 3030 assert(CI->getNumArgOperands() == 1 && 3031 "Mismatch between function args and call args"); 3032 NewCall = 3033 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 3034 break; 3035 3036 case Intrinsic::objectsize: { 3037 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 3038 ? Builder.getFalse() 3039 : CI->getArgOperand(2); 3040 NewCall = Builder.CreateCall( 3041 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize}); 3042 break; 3043 } 3044 3045 case Intrinsic::ctpop: 3046 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3047 break; 3048 3049 case Intrinsic::convert_from_fp16: 3050 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3051 break; 3052 3053 case Intrinsic::dbg_value: 3054 // Upgrade from the old version that had an extra offset argument. 3055 assert(CI->getNumArgOperands() == 4); 3056 // Drop nonzero offsets instead of attempting to upgrade them. 3057 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1))) 3058 if (Offset->isZeroValue()) { 3059 NewCall = Builder.CreateCall( 3060 NewFn, 3061 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)}); 3062 break; 3063 } 3064 CI->eraseFromParent(); 3065 return; 3066 3067 case Intrinsic::x86_xop_vfrcz_ss: 3068 case Intrinsic::x86_xop_vfrcz_sd: 3069 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 3070 break; 3071 3072 case Intrinsic::x86_xop_vpermil2pd: 3073 case Intrinsic::x86_xop_vpermil2ps: 3074 case Intrinsic::x86_xop_vpermil2pd_256: 3075 case Intrinsic::x86_xop_vpermil2ps_256: { 3076 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3077 CI->arg_operands().end()); 3078 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 3079 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 3080 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 3081 NewCall = Builder.CreateCall(NewFn, Args); 3082 break; 3083 } 3084 3085 case Intrinsic::x86_sse41_ptestc: 3086 case Intrinsic::x86_sse41_ptestz: 3087 case Intrinsic::x86_sse41_ptestnzc: { 3088 // The arguments for these intrinsics used to be v4f32, and changed 3089 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 3090 // So, the only thing required is a bitcast for both arguments. 3091 // First, check the arguments have the old type. 3092 Value *Arg0 = CI->getArgOperand(0); 3093 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 3094 return; 3095 3096 // Old intrinsic, add bitcasts 3097 Value *Arg1 = CI->getArgOperand(1); 3098 3099 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 3100 3101 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 3102 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 3103 3104 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 3105 break; 3106 } 3107 3108 case Intrinsic::x86_sse41_insertps: 3109 case Intrinsic::x86_sse41_dppd: 3110 case Intrinsic::x86_sse41_dpps: 3111 case Intrinsic::x86_sse41_mpsadbw: 3112 case Intrinsic::x86_avx_dp_ps_256: 3113 case Intrinsic::x86_avx2_mpsadbw: { 3114 // Need to truncate the last argument from i32 to i8 -- this argument models 3115 // an inherently 8-bit immediate operand to these x86 instructions. 3116 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3117 CI->arg_operands().end()); 3118 3119 // Replace the last argument with a trunc. 3120 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 3121 NewCall = Builder.CreateCall(NewFn, Args); 3122 break; 3123 } 3124 3125 case Intrinsic::thread_pointer: { 3126 NewCall = Builder.CreateCall(NewFn, {}); 3127 break; 3128 } 3129 3130 case Intrinsic::invariant_start: 3131 case Intrinsic::invariant_end: 3132 case Intrinsic::masked_load: 3133 case Intrinsic::masked_store: 3134 case Intrinsic::masked_gather: 3135 case Intrinsic::masked_scatter: { 3136 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3137 CI->arg_operands().end()); 3138 NewCall = Builder.CreateCall(NewFn, Args); 3139 break; 3140 } 3141 3142 case Intrinsic::memcpy: 3143 case Intrinsic::memmove: 3144 case Intrinsic::memset: { 3145 // We have to make sure that the call signature is what we're expecting. 3146 // We only want to change the old signatures by removing the alignment arg: 3147 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1) 3148 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1) 3149 // @llvm.memset...(i8*, i8, i[32|64], i32, i1) 3150 // -> @llvm.memset...(i8*, i8, i[32|64], i1) 3151 // Note: i8*'s in the above can be any pointer type 3152 if (CI->getNumArgOperands() != 5) { 3153 DefaultCase(); 3154 return; 3155 } 3156 // Remove alignment argument (3), and add alignment attributes to the 3157 // dest/src pointers. 3158 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), 3159 CI->getArgOperand(2), CI->getArgOperand(4)}; 3160 NewCall = Builder.CreateCall(NewFn, Args); 3161 auto *MemCI = cast<MemIntrinsic>(NewCall); 3162 // All mem intrinsics support dest alignment. 3163 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3)); 3164 MemCI->setDestAlignment(Align->getZExtValue()); 3165 // Memcpy/Memmove also support source alignment. 3166 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) 3167 MTI->setSourceAlignment(Align->getZExtValue()); 3168 break; 3169 } 3170 } 3171 assert(NewCall && "Should have either set this variable or returned through " 3172 "the default case"); 3173 std::string Name = CI->getName(); 3174 if (!Name.empty()) { 3175 CI->setName(Name + ".old"); 3176 NewCall->setName(Name); 3177 } 3178 CI->replaceAllUsesWith(NewCall); 3179 CI->eraseFromParent(); 3180 } 3181 3182 void llvm::UpgradeCallsToIntrinsic(Function *F) { 3183 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 3184 3185 // Check if this function should be upgraded and get the replacement function 3186 // if there is one. 3187 Function *NewFn; 3188 if (UpgradeIntrinsicFunction(F, NewFn)) { 3189 // Replace all users of the old function with the new function or new 3190 // instructions. This is not a range loop because the call is deleted. 3191 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 3192 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 3193 UpgradeIntrinsicCall(CI, NewFn); 3194 3195 // Remove old function, no longer used, from the module. 3196 F->eraseFromParent(); 3197 } 3198 } 3199 3200 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 3201 // Check if the tag uses struct-path aware TBAA format. 3202 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 3203 return &MD; 3204 3205 auto &Context = MD.getContext(); 3206 if (MD.getNumOperands() == 3) { 3207 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 3208 MDNode *ScalarType = MDNode::get(Context, Elts); 3209 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 3210 Metadata *Elts2[] = {ScalarType, ScalarType, 3211 ConstantAsMetadata::get( 3212 Constant::getNullValue(Type::getInt64Ty(Context))), 3213 MD.getOperand(2)}; 3214 return MDNode::get(Context, Elts2); 3215 } 3216 // Create a MDNode <MD, MD, offset 0> 3217 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 3218 Type::getInt64Ty(Context)))}; 3219 return MDNode::get(Context, Elts); 3220 } 3221 3222 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 3223 Instruction *&Temp) { 3224 if (Opc != Instruction::BitCast) 3225 return nullptr; 3226 3227 Temp = nullptr; 3228 Type *SrcTy = V->getType(); 3229 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3230 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3231 LLVMContext &Context = V->getContext(); 3232 3233 // We have no information about target data layout, so we assume that 3234 // the maximum pointer size is 64bit. 3235 Type *MidTy = Type::getInt64Ty(Context); 3236 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 3237 3238 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 3239 } 3240 3241 return nullptr; 3242 } 3243 3244 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 3245 if (Opc != Instruction::BitCast) 3246 return nullptr; 3247 3248 Type *SrcTy = C->getType(); 3249 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3250 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3251 LLVMContext &Context = C->getContext(); 3252 3253 // We have no information about target data layout, so we assume that 3254 // the maximum pointer size is 64bit. 3255 Type *MidTy = Type::getInt64Ty(Context); 3256 3257 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 3258 DestTy); 3259 } 3260 3261 return nullptr; 3262 } 3263 3264 /// Check the debug info version number, if it is out-dated, drop the debug 3265 /// info. Return true if module is modified. 3266 bool llvm::UpgradeDebugInfo(Module &M) { 3267 unsigned Version = getDebugMetadataVersionFromModule(M); 3268 if (Version == DEBUG_METADATA_VERSION) { 3269 bool BrokenDebugInfo = false; 3270 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo)) 3271 report_fatal_error("Broken module found, compilation aborted!"); 3272 if (!BrokenDebugInfo) 3273 // Everything is ok. 3274 return false; 3275 else { 3276 // Diagnose malformed debug info. 3277 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M); 3278 M.getContext().diagnose(Diag); 3279 } 3280 } 3281 bool Modified = StripDebugInfo(M); 3282 if (Modified && Version != DEBUG_METADATA_VERSION) { 3283 // Diagnose a version mismatch. 3284 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 3285 M.getContext().diagnose(DiagVersion); 3286 } 3287 return Modified; 3288 } 3289 3290 bool llvm::UpgradeRetainReleaseMarker(Module &M) { 3291 bool Changed = false; 3292 NamedMDNode *ModRetainReleaseMarker = 3293 M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"); 3294 if (ModRetainReleaseMarker) { 3295 MDNode *Op = ModRetainReleaseMarker->getOperand(0); 3296 if (Op) { 3297 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0)); 3298 if (ID) { 3299 SmallVector<StringRef, 4> ValueComp; 3300 ID->getString().split(ValueComp, "#"); 3301 if (ValueComp.size() == 2) { 3302 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str(); 3303 Metadata *Ops[1] = {MDString::get(M.getContext(), NewValue)}; 3304 ModRetainReleaseMarker->setOperand(0, 3305 MDNode::get(M.getContext(), Ops)); 3306 Changed = true; 3307 } 3308 } 3309 } 3310 } 3311 return Changed; 3312 } 3313 3314 bool llvm::UpgradeModuleFlags(Module &M) { 3315 NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 3316 if (!ModFlags) 3317 return false; 3318 3319 bool HasObjCFlag = false, HasClassProperties = false, Changed = false; 3320 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 3321 MDNode *Op = ModFlags->getOperand(I); 3322 if (Op->getNumOperands() != 3) 3323 continue; 3324 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 3325 if (!ID) 3326 continue; 3327 if (ID->getString() == "Objective-C Image Info Version") 3328 HasObjCFlag = true; 3329 if (ID->getString() == "Objective-C Class Properties") 3330 HasClassProperties = true; 3331 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two 3332 // field was Error and now they are Max. 3333 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") { 3334 if (auto *Behavior = 3335 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) { 3336 if (Behavior->getLimitedValue() == Module::Error) { 3337 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 3338 Metadata *Ops[3] = { 3339 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)), 3340 MDString::get(M.getContext(), ID->getString()), 3341 Op->getOperand(2)}; 3342 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3343 Changed = true; 3344 } 3345 } 3346 } 3347 // Upgrade Objective-C Image Info Section. Removed the whitespce in the 3348 // section name so that llvm-lto will not complain about mismatching 3349 // module flags that is functionally the same. 3350 if (ID->getString() == "Objective-C Image Info Section") { 3351 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) { 3352 SmallVector<StringRef, 4> ValueComp; 3353 Value->getString().split(ValueComp, " "); 3354 if (ValueComp.size() != 1) { 3355 std::string NewValue; 3356 for (auto &S : ValueComp) 3357 NewValue += S.str(); 3358 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1), 3359 MDString::get(M.getContext(), NewValue)}; 3360 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3361 Changed = true; 3362 } 3363 } 3364 } 3365 } 3366 3367 // "Objective-C Class Properties" is recently added for Objective-C. We 3368 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 3369 // flag of value 0, so we can correclty downgrade this flag when trying to 3370 // link an ObjC bitcode without this module flag with an ObjC bitcode with 3371 // this module flag. 3372 if (HasObjCFlag && !HasClassProperties) { 3373 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 3374 (uint32_t)0); 3375 Changed = true; 3376 } 3377 3378 return Changed; 3379 } 3380 3381 void llvm::UpgradeSectionAttributes(Module &M) { 3382 auto TrimSpaces = [](StringRef Section) -> std::string { 3383 SmallVector<StringRef, 5> Components; 3384 Section.split(Components, ','); 3385 3386 SmallString<32> Buffer; 3387 raw_svector_ostream OS(Buffer); 3388 3389 for (auto Component : Components) 3390 OS << ',' << Component.trim(); 3391 3392 return OS.str().substr(1); 3393 }; 3394 3395 for (auto &GV : M.globals()) { 3396 if (!GV.hasSection()) 3397 continue; 3398 3399 StringRef Section = GV.getSection(); 3400 3401 if (!Section.startswith("__DATA, __objc_catlist")) 3402 continue; 3403 3404 // __DATA, __objc_catlist, regular, no_dead_strip 3405 // __DATA,__objc_catlist,regular,no_dead_strip 3406 GV.setSection(TrimSpaces(Section)); 3407 } 3408 } 3409 3410 static bool isOldLoopArgument(Metadata *MD) { 3411 auto *T = dyn_cast_or_null<MDTuple>(MD); 3412 if (!T) 3413 return false; 3414 if (T->getNumOperands() < 1) 3415 return false; 3416 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 3417 if (!S) 3418 return false; 3419 return S->getString().startswith("llvm.vectorizer."); 3420 } 3421 3422 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 3423 StringRef OldPrefix = "llvm.vectorizer."; 3424 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 3425 3426 if (OldTag == "llvm.vectorizer.unroll") 3427 return MDString::get(C, "llvm.loop.interleave.count"); 3428 3429 return MDString::get( 3430 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 3431 .str()); 3432 } 3433 3434 static Metadata *upgradeLoopArgument(Metadata *MD) { 3435 auto *T = dyn_cast_or_null<MDTuple>(MD); 3436 if (!T) 3437 return MD; 3438 if (T->getNumOperands() < 1) 3439 return MD; 3440 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 3441 if (!OldTag) 3442 return MD; 3443 if (!OldTag->getString().startswith("llvm.vectorizer.")) 3444 return MD; 3445 3446 // This has an old tag. Upgrade it. 3447 SmallVector<Metadata *, 8> Ops; 3448 Ops.reserve(T->getNumOperands()); 3449 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 3450 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 3451 Ops.push_back(T->getOperand(I)); 3452 3453 return MDTuple::get(T->getContext(), Ops); 3454 } 3455 3456 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 3457 auto *T = dyn_cast<MDTuple>(&N); 3458 if (!T) 3459 return &N; 3460 3461 if (none_of(T->operands(), isOldLoopArgument)) 3462 return &N; 3463 3464 SmallVector<Metadata *, 8> Ops; 3465 Ops.reserve(T->getNumOperands()); 3466 for (Metadata *MD : T->operands()) 3467 Ops.push_back(upgradeLoopArgument(MD)); 3468 3469 return MDTuple::get(T->getContext(), Ops); 3470 } 3471