1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the auto-upgrade helper functions. 10 // This is where deprecated IR intrinsics and other IR features are updated to 11 // current specifications. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/AutoUpgrade.h" 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/DIBuilder.h" 19 #include "llvm/IR/DebugInfo.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/IR/Module.h" 27 #include "llvm/IR/Verifier.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/Regex.h" 30 #include <cstring> 31 using namespace llvm; 32 33 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 34 35 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 36 // changed their type from v4f32 to v2i64. 37 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 38 Function *&NewFn) { 39 // Check whether this is an old version of the function, which received 40 // v4f32 arguments. 41 Type *Arg0Type = F->getFunctionType()->getParamType(0); 42 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 43 return false; 44 45 // Yes, it's old, replace it with new version. 46 rename(F); 47 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 48 return true; 49 } 50 51 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 52 // arguments have changed their type from i32 to i8. 53 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 54 Function *&NewFn) { 55 // Check that the last argument is an i32. 56 Type *LastArgType = F->getFunctionType()->getParamType( 57 F->getFunctionType()->getNumParams() - 1); 58 if (!LastArgType->isIntegerTy(32)) 59 return false; 60 61 // Move this function aside and map down. 62 rename(F); 63 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 64 return true; 65 } 66 67 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 68 // All of the intrinsics matches below should be marked with which llvm 69 // version started autoupgrading them. At some point in the future we would 70 // like to use this information to remove upgrade code for some older 71 // intrinsics. It is currently undecided how we will determine that future 72 // point. 73 if (Name == "addcarryx.u32" || // Added in 8.0 74 Name == "addcarryx.u64" || // Added in 8.0 75 Name == "addcarry.u32" || // Added in 8.0 76 Name == "addcarry.u64" || // Added in 8.0 77 Name == "subborrow.u32" || // Added in 8.0 78 Name == "subborrow.u64" || // Added in 8.0 79 Name.startswith("sse2.padds.") || // Added in 8.0 80 Name.startswith("sse2.psubs.") || // Added in 8.0 81 Name.startswith("sse2.paddus.") || // Added in 8.0 82 Name.startswith("sse2.psubus.") || // Added in 8.0 83 Name.startswith("avx2.padds.") || // Added in 8.0 84 Name.startswith("avx2.psubs.") || // Added in 8.0 85 Name.startswith("avx2.paddus.") || // Added in 8.0 86 Name.startswith("avx2.psubus.") || // Added in 8.0 87 Name.startswith("avx512.padds.") || // Added in 8.0 88 Name.startswith("avx512.psubs.") || // Added in 8.0 89 Name.startswith("avx512.mask.padds.") || // Added in 8.0 90 Name.startswith("avx512.mask.psubs.") || // Added in 8.0 91 Name.startswith("avx512.mask.paddus.") || // Added in 8.0 92 Name.startswith("avx512.mask.psubus.") || // Added in 8.0 93 Name=="ssse3.pabs.b.128" || // Added in 6.0 94 Name=="ssse3.pabs.w.128" || // Added in 6.0 95 Name=="ssse3.pabs.d.128" || // Added in 6.0 96 Name.startswith("fma4.vfmadd.s") || // Added in 7.0 97 Name.startswith("fma.vfmadd.") || // Added in 7.0 98 Name.startswith("fma.vfmsub.") || // Added in 7.0 99 Name.startswith("fma.vfmaddsub.") || // Added in 7.0 100 Name.startswith("fma.vfmsubadd.") || // Added in 7.0 101 Name.startswith("fma.vfnmadd.") || // Added in 7.0 102 Name.startswith("fma.vfnmsub.") || // Added in 7.0 103 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0 104 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0 105 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0 106 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0 107 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0 108 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0 109 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0 110 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0 111 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0 112 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0 113 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0 114 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0 115 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0 116 Name.startswith("avx512.kunpck") || //added in 6.0 117 Name.startswith("avx2.pabs.") || // Added in 6.0 118 Name.startswith("avx512.mask.pabs.") || // Added in 6.0 119 Name.startswith("avx512.broadcastm") || // Added in 6.0 120 Name == "sse.sqrt.ss" || // Added in 7.0 121 Name == "sse2.sqrt.sd" || // Added in 7.0 122 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0 123 Name.startswith("avx.sqrt.p") || // Added in 7.0 124 Name.startswith("sse2.sqrt.p") || // Added in 7.0 125 Name.startswith("sse.sqrt.p") || // Added in 7.0 126 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0 127 Name.startswith("sse2.pcmpeq.") || // Added in 3.1 128 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 129 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 130 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 131 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 132 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 133 Name.startswith("avx.vperm2f128.") || // Added in 6.0 134 Name == "avx2.vperm2i128" || // Added in 6.0 135 Name == "sse.add.ss" || // Added in 4.0 136 Name == "sse2.add.sd" || // Added in 4.0 137 Name == "sse.sub.ss" || // Added in 4.0 138 Name == "sse2.sub.sd" || // Added in 4.0 139 Name == "sse.mul.ss" || // Added in 4.0 140 Name == "sse2.mul.sd" || // Added in 4.0 141 Name == "sse.div.ss" || // Added in 4.0 142 Name == "sse2.div.sd" || // Added in 4.0 143 Name == "sse41.pmaxsb" || // Added in 3.9 144 Name == "sse2.pmaxs.w" || // Added in 3.9 145 Name == "sse41.pmaxsd" || // Added in 3.9 146 Name == "sse2.pmaxu.b" || // Added in 3.9 147 Name == "sse41.pmaxuw" || // Added in 3.9 148 Name == "sse41.pmaxud" || // Added in 3.9 149 Name == "sse41.pminsb" || // Added in 3.9 150 Name == "sse2.pmins.w" || // Added in 3.9 151 Name == "sse41.pminsd" || // Added in 3.9 152 Name == "sse2.pminu.b" || // Added in 3.9 153 Name == "sse41.pminuw" || // Added in 3.9 154 Name == "sse41.pminud" || // Added in 3.9 155 Name == "avx512.kand.w" || // Added in 7.0 156 Name == "avx512.kandn.w" || // Added in 7.0 157 Name == "avx512.knot.w" || // Added in 7.0 158 Name == "avx512.kor.w" || // Added in 7.0 159 Name == "avx512.kxor.w" || // Added in 7.0 160 Name == "avx512.kxnor.w" || // Added in 7.0 161 Name == "avx512.kortestc.w" || // Added in 7.0 162 Name == "avx512.kortestz.w" || // Added in 7.0 163 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 164 Name.startswith("avx2.pmax") || // Added in 3.9 165 Name.startswith("avx2.pmin") || // Added in 3.9 166 Name.startswith("avx512.mask.pmax") || // Added in 4.0 167 Name.startswith("avx512.mask.pmin") || // Added in 4.0 168 Name.startswith("avx2.vbroadcast") || // Added in 3.8 169 Name.startswith("avx2.pbroadcast") || // Added in 3.8 170 Name.startswith("avx.vpermil.") || // Added in 3.1 171 Name.startswith("sse2.pshuf") || // Added in 3.9 172 Name.startswith("avx512.pbroadcast") || // Added in 3.9 173 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 174 Name.startswith("avx512.mask.movddup") || // Added in 3.9 175 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 176 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 177 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 178 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 179 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 180 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 181 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 182 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 183 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 184 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 185 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 186 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 187 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 188 Name.startswith("avx512.mask.pand.") || // Added in 3.9 189 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 190 Name.startswith("avx512.mask.por.") || // Added in 3.9 191 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 192 Name.startswith("avx512.mask.and.") || // Added in 3.9 193 Name.startswith("avx512.mask.andn.") || // Added in 3.9 194 Name.startswith("avx512.mask.or.") || // Added in 3.9 195 Name.startswith("avx512.mask.xor.") || // Added in 3.9 196 Name.startswith("avx512.mask.padd.") || // Added in 4.0 197 Name.startswith("avx512.mask.psub.") || // Added in 4.0 198 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 199 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 200 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 201 Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0 202 Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0 203 Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0 204 Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0 205 Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0 206 Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0 207 Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0 208 Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0 209 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0 210 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0 211 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0 212 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0 213 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0 214 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0 215 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0 216 Name == "avx512.cvtusi2sd" || // Added in 7.0 217 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 218 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 219 Name == "sse2.pmulu.dq" || // Added in 7.0 220 Name == "sse41.pmuldq" || // Added in 7.0 221 Name == "avx2.pmulu.dq" || // Added in 7.0 222 Name == "avx2.pmul.dq" || // Added in 7.0 223 Name == "avx512.pmulu.dq.512" || // Added in 7.0 224 Name == "avx512.pmul.dq.512" || // Added in 7.0 225 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 226 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 227 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0 228 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0 229 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0 230 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0 231 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0 232 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 233 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 234 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 235 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 236 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0 237 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0 238 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0 239 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0 240 Name.startswith("avx512.mask.cmp.p") || // Added in 7.0 241 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0 242 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0 243 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0 244 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0 245 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0 246 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 247 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 248 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 249 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 250 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 251 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 252 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 253 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 254 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 255 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 256 Name.startswith("avx512.mask.pslli") || // Added in 4.0 257 Name.startswith("avx512.mask.psrai") || // Added in 4.0 258 Name.startswith("avx512.mask.psrli") || // Added in 4.0 259 Name.startswith("avx512.mask.psllv") || // Added in 4.0 260 Name.startswith("avx512.mask.psrav") || // Added in 4.0 261 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 262 Name.startswith("sse41.pmovsx") || // Added in 3.8 263 Name.startswith("sse41.pmovzx") || // Added in 3.9 264 Name.startswith("avx2.pmovsx") || // Added in 3.9 265 Name.startswith("avx2.pmovzx") || // Added in 3.9 266 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 267 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 268 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 269 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0 270 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0 271 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0 272 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0 273 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0 274 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0 275 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0 276 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0 277 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0 278 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0 279 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0 280 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0 281 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0 282 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0 283 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0 284 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0 285 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0 286 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0 287 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0 288 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0 289 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0 290 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0 291 Name.startswith("avx512.vpshld.") || // Added in 8.0 292 Name.startswith("avx512.vpshrd.") || // Added in 8.0 293 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0 294 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0 295 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0 296 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0 297 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0 298 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0 299 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0 300 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0 301 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0 302 Name.startswith("avx512.mask.conflict.") || // Added in 9.0 303 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0 304 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0 305 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0 306 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0 307 Name == "sse.cvtsi2ss" || // Added in 7.0 308 Name == "sse.cvtsi642ss" || // Added in 7.0 309 Name == "sse2.cvtsi2sd" || // Added in 7.0 310 Name == "sse2.cvtsi642sd" || // Added in 7.0 311 Name == "sse2.cvtss2sd" || // Added in 7.0 312 Name == "sse2.cvtdq2pd" || // Added in 3.9 313 Name == "sse2.cvtdq2ps" || // Added in 7.0 314 Name == "sse2.cvtps2pd" || // Added in 3.9 315 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 316 Name == "avx.cvtdq2.ps.256" || // Added in 7.0 317 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 318 Name.startswith("avx.vinsertf128.") || // Added in 3.7 319 Name == "avx2.vinserti128" || // Added in 3.7 320 Name.startswith("avx512.mask.insert") || // Added in 4.0 321 Name.startswith("avx.vextractf128.") || // Added in 3.7 322 Name == "avx2.vextracti128" || // Added in 3.7 323 Name.startswith("avx512.mask.vextract") || // Added in 4.0 324 Name.startswith("sse4a.movnt.") || // Added in 3.9 325 Name.startswith("avx.movnt.") || // Added in 3.2 326 Name.startswith("avx512.storent.") || // Added in 3.9 327 Name == "sse41.movntdqa" || // Added in 5.0 328 Name == "avx2.movntdqa" || // Added in 5.0 329 Name == "avx512.movntdqa" || // Added in 5.0 330 Name == "sse2.storel.dq" || // Added in 3.9 331 Name.startswith("sse.storeu.") || // Added in 3.9 332 Name.startswith("sse2.storeu.") || // Added in 3.9 333 Name.startswith("avx.storeu.") || // Added in 3.9 334 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 335 Name.startswith("avx512.mask.store.p") || // Added in 3.9 336 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 337 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 338 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 339 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 340 Name == "avx512.mask.store.ss" || // Added in 7.0 341 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 342 Name.startswith("avx512.mask.load.") || // Added in 3.9 343 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0 344 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0 345 Name.startswith("avx512.mask.expand.b") || // Added in 9.0 346 Name.startswith("avx512.mask.expand.w") || // Added in 9.0 347 Name.startswith("avx512.mask.expand.d") || // Added in 9.0 348 Name.startswith("avx512.mask.expand.q") || // Added in 9.0 349 Name.startswith("avx512.mask.expand.p") || // Added in 9.0 350 Name.startswith("avx512.mask.compress.b") || // Added in 9.0 351 Name.startswith("avx512.mask.compress.w") || // Added in 9.0 352 Name.startswith("avx512.mask.compress.d") || // Added in 9.0 353 Name.startswith("avx512.mask.compress.q") || // Added in 9.0 354 Name.startswith("avx512.mask.compress.p") || // Added in 9.0 355 Name == "sse42.crc32.64.8" || // Added in 3.4 356 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 357 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0 358 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 359 Name.startswith("avx512.mask.valign.") || // Added in 4.0 360 Name.startswith("sse2.psll.dq") || // Added in 3.7 361 Name.startswith("sse2.psrl.dq") || // Added in 3.7 362 Name.startswith("avx2.psll.dq") || // Added in 3.7 363 Name.startswith("avx2.psrl.dq") || // Added in 3.7 364 Name.startswith("avx512.psll.dq") || // Added in 3.9 365 Name.startswith("avx512.psrl.dq") || // Added in 3.9 366 Name == "sse41.pblendw" || // Added in 3.7 367 Name.startswith("sse41.blendp") || // Added in 3.7 368 Name.startswith("avx.blend.p") || // Added in 3.7 369 Name == "avx2.pblendw" || // Added in 3.7 370 Name.startswith("avx2.pblendd.") || // Added in 3.7 371 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 372 Name == "avx2.vbroadcasti128" || // Added in 3.7 373 Name.startswith("avx512.mask.broadcastf") || // Added in 6.0 374 Name.startswith("avx512.mask.broadcasti") || // Added in 6.0 375 Name == "xop.vpcmov" || // Added in 3.8 376 Name == "xop.vpcmov.256" || // Added in 5.0 377 Name.startswith("avx512.mask.move.s") || // Added in 4.0 378 Name.startswith("avx512.cvtmask2") || // Added in 5.0 379 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0 380 Name.startswith("xop.vprot") || // Added in 8.0 381 Name.startswith("avx512.prol") || // Added in 8.0 382 Name.startswith("avx512.pror") || // Added in 8.0 383 Name.startswith("avx512.mask.prorv.") || // Added in 8.0 384 Name.startswith("avx512.mask.pror.") || // Added in 8.0 385 Name.startswith("avx512.mask.prolv.") || // Added in 8.0 386 Name.startswith("avx512.mask.prol.") || // Added in 8.0 387 Name.startswith("avx512.ptestm") || //Added in 6.0 388 Name.startswith("avx512.ptestnm") || //Added in 6.0 389 Name.startswith("sse2.pavg") || // Added in 6.0 390 Name.startswith("avx2.pavg") || // Added in 6.0 391 Name.startswith("avx512.mask.pavg")) // Added in 6.0 392 return true; 393 394 return false; 395 } 396 397 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 398 Function *&NewFn) { 399 // Only handle intrinsics that start with "x86.". 400 if (!Name.startswith("x86.")) 401 return false; 402 // Remove "x86." prefix. 403 Name = Name.substr(4); 404 405 if (ShouldUpgradeX86Intrinsic(F, Name)) { 406 NewFn = nullptr; 407 return true; 408 } 409 410 if (Name == "rdtscp") { // Added in 8.0 411 // If this intrinsic has 0 operands, it's the new version. 412 if (F->getFunctionType()->getNumParams() == 0) 413 return false; 414 415 rename(F); 416 NewFn = Intrinsic::getDeclaration(F->getParent(), 417 Intrinsic::x86_rdtscp); 418 return true; 419 } 420 421 // SSE4.1 ptest functions may have an old signature. 422 if (Name.startswith("sse41.ptest")) { // Added in 3.2 423 if (Name.substr(11) == "c") 424 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 425 if (Name.substr(11) == "z") 426 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 427 if (Name.substr(11) == "nzc") 428 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 429 } 430 // Several blend and other instructions with masks used the wrong number of 431 // bits. 432 if (Name == "sse41.insertps") // Added in 3.6 433 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 434 NewFn); 435 if (Name == "sse41.dppd") // Added in 3.6 436 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 437 NewFn); 438 if (Name == "sse41.dpps") // Added in 3.6 439 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 440 NewFn); 441 if (Name == "sse41.mpsadbw") // Added in 3.6 442 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 443 NewFn); 444 if (Name == "avx.dp.ps.256") // Added in 3.6 445 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 446 NewFn); 447 if (Name == "avx2.mpsadbw") // Added in 3.6 448 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 449 NewFn); 450 451 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 452 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 453 rename(F); 454 NewFn = Intrinsic::getDeclaration(F->getParent(), 455 Intrinsic::x86_xop_vfrcz_ss); 456 return true; 457 } 458 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 459 rename(F); 460 NewFn = Intrinsic::getDeclaration(F->getParent(), 461 Intrinsic::x86_xop_vfrcz_sd); 462 return true; 463 } 464 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 465 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 466 auto Idx = F->getFunctionType()->getParamType(2); 467 if (Idx->isFPOrFPVectorTy()) { 468 rename(F); 469 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 470 unsigned EltSize = Idx->getScalarSizeInBits(); 471 Intrinsic::ID Permil2ID; 472 if (EltSize == 64 && IdxSize == 128) 473 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 474 else if (EltSize == 32 && IdxSize == 128) 475 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 476 else if (EltSize == 64 && IdxSize == 256) 477 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 478 else 479 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 480 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 481 return true; 482 } 483 } 484 485 if (Name == "seh.recoverfp") { 486 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp); 487 return true; 488 } 489 490 return false; 491 } 492 493 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 494 assert(F && "Illegal to upgrade a non-existent Function."); 495 496 // Quickly eliminate it, if it's not a candidate. 497 StringRef Name = F->getName(); 498 if (Name.size() <= 8 || !Name.startswith("llvm.")) 499 return false; 500 Name = Name.substr(5); // Strip off "llvm." 501 502 switch (Name[0]) { 503 default: break; 504 case 'a': { 505 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 506 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 507 F->arg_begin()->getType()); 508 return true; 509 } 510 if (Name.startswith("arm.neon.vclz")) { 511 Type* args[2] = { 512 F->arg_begin()->getType(), 513 Type::getInt1Ty(F->getContext()) 514 }; 515 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 516 // the end of the name. Change name from llvm.arm.neon.vclz.* to 517 // llvm.ctlz.* 518 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 519 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 520 "llvm.ctlz." + Name.substr(14), F->getParent()); 521 return true; 522 } 523 if (Name.startswith("arm.neon.vcnt")) { 524 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 525 F->arg_begin()->getType()); 526 return true; 527 } 528 Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 529 if (vldRegex.match(Name)) { 530 auto fArgs = F->getFunctionType()->params(); 531 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 532 // Can't use Intrinsic::getDeclaration here as the return types might 533 // then only be structurally equal. 534 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 535 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 536 "llvm." + Name + ".p0i8", F->getParent()); 537 return true; 538 } 539 Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 540 if (vstRegex.match(Name)) { 541 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 542 Intrinsic::arm_neon_vst2, 543 Intrinsic::arm_neon_vst3, 544 Intrinsic::arm_neon_vst4}; 545 546 static const Intrinsic::ID StoreLaneInts[] = { 547 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 548 Intrinsic::arm_neon_vst4lane 549 }; 550 551 auto fArgs = F->getFunctionType()->params(); 552 Type *Tys[] = {fArgs[0], fArgs[1]}; 553 if (Name.find("lane") == StringRef::npos) 554 NewFn = Intrinsic::getDeclaration(F->getParent(), 555 StoreInts[fArgs.size() - 3], Tys); 556 else 557 NewFn = Intrinsic::getDeclaration(F->getParent(), 558 StoreLaneInts[fArgs.size() - 5], Tys); 559 return true; 560 } 561 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 562 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 563 return true; 564 } 565 break; 566 } 567 568 case 'c': { 569 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 570 rename(F); 571 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 572 F->arg_begin()->getType()); 573 return true; 574 } 575 if (Name.startswith("cttz.") && F->arg_size() == 1) { 576 rename(F); 577 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 578 F->arg_begin()->getType()); 579 return true; 580 } 581 break; 582 } 583 case 'd': { 584 if (Name == "dbg.value" && F->arg_size() == 4) { 585 rename(F); 586 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value); 587 return true; 588 } 589 break; 590 } 591 case 'i': 592 case 'l': { 593 bool IsLifetimeStart = Name.startswith("lifetime.start"); 594 if (IsLifetimeStart || Name.startswith("invariant.start")) { 595 Intrinsic::ID ID = IsLifetimeStart ? 596 Intrinsic::lifetime_start : Intrinsic::invariant_start; 597 auto Args = F->getFunctionType()->params(); 598 Type* ObjectPtr[1] = {Args[1]}; 599 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 600 rename(F); 601 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 602 return true; 603 } 604 } 605 606 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 607 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 608 Intrinsic::ID ID = IsLifetimeEnd ? 609 Intrinsic::lifetime_end : Intrinsic::invariant_end; 610 611 auto Args = F->getFunctionType()->params(); 612 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 613 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 614 rename(F); 615 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 616 return true; 617 } 618 } 619 if (Name.startswith("invariant.group.barrier")) { 620 // Rename invariant.group.barrier to launder.invariant.group 621 auto Args = F->getFunctionType()->params(); 622 Type* ObjectPtr[1] = {Args[0]}; 623 rename(F); 624 NewFn = Intrinsic::getDeclaration(F->getParent(), 625 Intrinsic::launder_invariant_group, ObjectPtr); 626 return true; 627 628 } 629 630 break; 631 } 632 case 'm': { 633 if (Name.startswith("masked.load.")) { 634 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 635 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 636 rename(F); 637 NewFn = Intrinsic::getDeclaration(F->getParent(), 638 Intrinsic::masked_load, 639 Tys); 640 return true; 641 } 642 } 643 if (Name.startswith("masked.store.")) { 644 auto Args = F->getFunctionType()->params(); 645 Type *Tys[] = { Args[0], Args[1] }; 646 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 647 rename(F); 648 NewFn = Intrinsic::getDeclaration(F->getParent(), 649 Intrinsic::masked_store, 650 Tys); 651 return true; 652 } 653 } 654 // Renaming gather/scatter intrinsics with no address space overloading 655 // to the new overload which includes an address space 656 if (Name.startswith("masked.gather.")) { 657 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; 658 if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) { 659 rename(F); 660 NewFn = Intrinsic::getDeclaration(F->getParent(), 661 Intrinsic::masked_gather, Tys); 662 return true; 663 } 664 } 665 if (Name.startswith("masked.scatter.")) { 666 auto Args = F->getFunctionType()->params(); 667 Type *Tys[] = {Args[0], Args[1]}; 668 if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) { 669 rename(F); 670 NewFn = Intrinsic::getDeclaration(F->getParent(), 671 Intrinsic::masked_scatter, Tys); 672 return true; 673 } 674 } 675 // Updating the memory intrinsics (memcpy/memmove/memset) that have an 676 // alignment parameter to embedding the alignment as an attribute of 677 // the pointer args. 678 if (Name.startswith("memcpy.") && F->arg_size() == 5) { 679 rename(F); 680 // Get the types of dest, src, and len 681 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 682 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy, 683 ParamTypes); 684 return true; 685 } 686 if (Name.startswith("memmove.") && F->arg_size() == 5) { 687 rename(F); 688 // Get the types of dest, src, and len 689 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 690 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove, 691 ParamTypes); 692 return true; 693 } 694 if (Name.startswith("memset.") && F->arg_size() == 5) { 695 rename(F); 696 // Get the types of dest, and len 697 const auto *FT = F->getFunctionType(); 698 Type *ParamTypes[2] = { 699 FT->getParamType(0), // Dest 700 FT->getParamType(2) // len 701 }; 702 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset, 703 ParamTypes); 704 return true; 705 } 706 break; 707 } 708 case 'n': { 709 if (Name.startswith("nvvm.")) { 710 Name = Name.substr(5); 711 712 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 713 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 714 .Cases("brev32", "brev64", Intrinsic::bitreverse) 715 .Case("clz.i", Intrinsic::ctlz) 716 .Case("popc.i", Intrinsic::ctpop) 717 .Default(Intrinsic::not_intrinsic); 718 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 719 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 720 {F->getReturnType()}); 721 return true; 722 } 723 724 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 725 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 726 // 727 // TODO: We could add lohi.i2d. 728 bool Expand = StringSwitch<bool>(Name) 729 .Cases("abs.i", "abs.ll", true) 730 .Cases("clz.ll", "popc.ll", "h2f", true) 731 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 732 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 733 .Default(false); 734 if (Expand) { 735 NewFn = nullptr; 736 return true; 737 } 738 } 739 break; 740 } 741 case 'o': 742 // We only need to change the name to match the mangling including the 743 // address space. 744 if (Name.startswith("objectsize.")) { 745 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 746 if (F->arg_size() == 2 || F->arg_size() == 3 || 747 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 748 rename(F); 749 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 750 Tys); 751 return true; 752 } 753 } 754 break; 755 756 case 's': 757 if (Name == "stackprotectorcheck") { 758 NewFn = nullptr; 759 return true; 760 } 761 break; 762 763 case 'x': 764 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 765 return true; 766 } 767 // Remangle our intrinsic since we upgrade the mangling 768 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 769 if (Result != None) { 770 NewFn = Result.getValue(); 771 return true; 772 } 773 774 // This may not belong here. This function is effectively being overloaded 775 // to both detect an intrinsic which needs upgrading, and to provide the 776 // upgraded form of the intrinsic. We should perhaps have two separate 777 // functions for this. 778 return false; 779 } 780 781 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 782 NewFn = nullptr; 783 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 784 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 785 786 // Upgrade intrinsic attributes. This does not change the function. 787 if (NewFn) 788 F = NewFn; 789 if (Intrinsic::ID id = F->getIntrinsicID()) 790 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 791 return Upgraded; 792 } 793 794 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 795 // Nothing to do yet. 796 return false; 797 } 798 799 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 800 // to byte shuffles. 801 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 802 Value *Op, unsigned Shift) { 803 Type *ResultTy = Op->getType(); 804 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 805 806 // Bitcast from a 64-bit element type to a byte element type. 807 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 808 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 809 810 // We'll be shuffling in zeroes. 811 Value *Res = Constant::getNullValue(VecTy); 812 813 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 814 // we'll just return the zero vector. 815 if (Shift < 16) { 816 uint32_t Idxs[64]; 817 // 256/512-bit version is split into 2/4 16-byte lanes. 818 for (unsigned l = 0; l != NumElts; l += 16) 819 for (unsigned i = 0; i != 16; ++i) { 820 unsigned Idx = NumElts + i - Shift; 821 if (Idx < NumElts) 822 Idx -= NumElts - 16; // end of lane, switch operand. 823 Idxs[l + i] = Idx + l; 824 } 825 826 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 827 } 828 829 // Bitcast back to a 64-bit element type. 830 return Builder.CreateBitCast(Res, ResultTy, "cast"); 831 } 832 833 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 834 // to byte shuffles. 835 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 836 unsigned Shift) { 837 Type *ResultTy = Op->getType(); 838 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 839 840 // Bitcast from a 64-bit element type to a byte element type. 841 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 842 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 843 844 // We'll be shuffling in zeroes. 845 Value *Res = Constant::getNullValue(VecTy); 846 847 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 848 // we'll just return the zero vector. 849 if (Shift < 16) { 850 uint32_t Idxs[64]; 851 // 256/512-bit version is split into 2/4 16-byte lanes. 852 for (unsigned l = 0; l != NumElts; l += 16) 853 for (unsigned i = 0; i != 16; ++i) { 854 unsigned Idx = i + Shift; 855 if (Idx >= 16) 856 Idx += NumElts - 16; // end of lane, switch operand. 857 Idxs[l + i] = Idx + l; 858 } 859 860 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 861 } 862 863 // Bitcast back to a 64-bit element type. 864 return Builder.CreateBitCast(Res, ResultTy, "cast"); 865 } 866 867 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 868 unsigned NumElts) { 869 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 870 cast<IntegerType>(Mask->getType())->getBitWidth()); 871 Mask = Builder.CreateBitCast(Mask, MaskTy); 872 873 // If we have less than 8 elements, then the starting mask was an i8 and 874 // we need to extract down to the right number of elements. 875 if (NumElts < 8) { 876 uint32_t Indices[4]; 877 for (unsigned i = 0; i != NumElts; ++i) 878 Indices[i] = i; 879 Mask = Builder.CreateShuffleVector(Mask, Mask, 880 makeArrayRef(Indices, NumElts), 881 "extract"); 882 } 883 884 return Mask; 885 } 886 887 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 888 Value *Op0, Value *Op1) { 889 // If the mask is all ones just emit the first operation. 890 if (const auto *C = dyn_cast<Constant>(Mask)) 891 if (C->isAllOnesValue()) 892 return Op0; 893 894 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 895 return Builder.CreateSelect(Mask, Op0, Op1); 896 } 897 898 static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, 899 Value *Op0, Value *Op1) { 900 // If the mask is all ones just emit the first operation. 901 if (const auto *C = dyn_cast<Constant>(Mask)) 902 if (C->isAllOnesValue()) 903 return Op0; 904 905 llvm::VectorType *MaskTy = 906 llvm::VectorType::get(Builder.getInt1Ty(), 907 Mask->getType()->getIntegerBitWidth()); 908 Mask = Builder.CreateBitCast(Mask, MaskTy); 909 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0); 910 return Builder.CreateSelect(Mask, Op0, Op1); 911 } 912 913 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 914 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 915 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 916 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 917 Value *Op1, Value *Shift, 918 Value *Passthru, Value *Mask, 919 bool IsVALIGN) { 920 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 921 922 unsigned NumElts = Op0->getType()->getVectorNumElements(); 923 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 924 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 925 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 926 927 // Mask the immediate for VALIGN. 928 if (IsVALIGN) 929 ShiftVal &= (NumElts - 1); 930 931 // If palignr is shifting the pair of vectors more than the size of two 932 // lanes, emit zero. 933 if (ShiftVal >= 32) 934 return llvm::Constant::getNullValue(Op0->getType()); 935 936 // If palignr is shifting the pair of input vectors more than one lane, 937 // but less than two lanes, convert to shifting in zeroes. 938 if (ShiftVal > 16) { 939 ShiftVal -= 16; 940 Op1 = Op0; 941 Op0 = llvm::Constant::getNullValue(Op0->getType()); 942 } 943 944 uint32_t Indices[64]; 945 // 256-bit palignr operates on 128-bit lanes so we need to handle that 946 for (unsigned l = 0; l < NumElts; l += 16) { 947 for (unsigned i = 0; i != 16; ++i) { 948 unsigned Idx = ShiftVal + i; 949 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 950 Idx += NumElts - 16; // End of lane, switch operand. 951 Indices[l + i] = Idx + l; 952 } 953 } 954 955 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 956 makeArrayRef(Indices, NumElts), 957 "palignr"); 958 959 return EmitX86Select(Builder, Mask, Align, Passthru); 960 } 961 962 static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI, 963 bool ZeroMask, bool IndexForm) { 964 Type *Ty = CI.getType(); 965 unsigned VecWidth = Ty->getPrimitiveSizeInBits(); 966 unsigned EltWidth = Ty->getScalarSizeInBits(); 967 bool IsFloat = Ty->isFPOrFPVectorTy(); 968 Intrinsic::ID IID; 969 if (VecWidth == 128 && EltWidth == 32 && IsFloat) 970 IID = Intrinsic::x86_avx512_vpermi2var_ps_128; 971 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat) 972 IID = Intrinsic::x86_avx512_vpermi2var_d_128; 973 else if (VecWidth == 128 && EltWidth == 64 && IsFloat) 974 IID = Intrinsic::x86_avx512_vpermi2var_pd_128; 975 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat) 976 IID = Intrinsic::x86_avx512_vpermi2var_q_128; 977 else if (VecWidth == 256 && EltWidth == 32 && IsFloat) 978 IID = Intrinsic::x86_avx512_vpermi2var_ps_256; 979 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 980 IID = Intrinsic::x86_avx512_vpermi2var_d_256; 981 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 982 IID = Intrinsic::x86_avx512_vpermi2var_pd_256; 983 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 984 IID = Intrinsic::x86_avx512_vpermi2var_q_256; 985 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 986 IID = Intrinsic::x86_avx512_vpermi2var_ps_512; 987 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 988 IID = Intrinsic::x86_avx512_vpermi2var_d_512; 989 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 990 IID = Intrinsic::x86_avx512_vpermi2var_pd_512; 991 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 992 IID = Intrinsic::x86_avx512_vpermi2var_q_512; 993 else if (VecWidth == 128 && EltWidth == 16) 994 IID = Intrinsic::x86_avx512_vpermi2var_hi_128; 995 else if (VecWidth == 256 && EltWidth == 16) 996 IID = Intrinsic::x86_avx512_vpermi2var_hi_256; 997 else if (VecWidth == 512 && EltWidth == 16) 998 IID = Intrinsic::x86_avx512_vpermi2var_hi_512; 999 else if (VecWidth == 128 && EltWidth == 8) 1000 IID = Intrinsic::x86_avx512_vpermi2var_qi_128; 1001 else if (VecWidth == 256 && EltWidth == 8) 1002 IID = Intrinsic::x86_avx512_vpermi2var_qi_256; 1003 else if (VecWidth == 512 && EltWidth == 8) 1004 IID = Intrinsic::x86_avx512_vpermi2var_qi_512; 1005 else 1006 llvm_unreachable("Unexpected intrinsic"); 1007 1008 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1), 1009 CI.getArgOperand(2) }; 1010 1011 // If this isn't index form we need to swap operand 0 and 1. 1012 if (!IndexForm) 1013 std::swap(Args[0], Args[1]); 1014 1015 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1016 Args); 1017 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) 1018 : Builder.CreateBitCast(CI.getArgOperand(1), 1019 Ty); 1020 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru); 1021 } 1022 1023 static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI, 1024 bool IsSigned, bool IsAddition) { 1025 Type *Ty = CI.getType(); 1026 Value *Op0 = CI.getOperand(0); 1027 Value *Op1 = CI.getOperand(1); 1028 1029 Intrinsic::ID IID = 1030 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat) 1031 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat); 1032 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1033 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1}); 1034 1035 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1036 Value *VecSrc = CI.getOperand(2); 1037 Value *Mask = CI.getOperand(3); 1038 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1039 } 1040 return Res; 1041 } 1042 1043 static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI, 1044 bool IsRotateRight) { 1045 Type *Ty = CI.getType(); 1046 Value *Src = CI.getArgOperand(0); 1047 Value *Amt = CI.getArgOperand(1); 1048 1049 // Amount may be scalar immediate, in which case create a splat vector. 1050 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1051 // we only care about the lowest log2 bits anyway. 1052 if (Amt->getType() != Ty) { 1053 unsigned NumElts = Ty->getVectorNumElements(); 1054 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1055 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1056 } 1057 1058 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; 1059 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1060 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt}); 1061 1062 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1063 Value *VecSrc = CI.getOperand(2); 1064 Value *Mask = CI.getOperand(3); 1065 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1066 } 1067 return Res; 1068 } 1069 1070 static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallInst &CI, unsigned Imm, 1071 bool IsSigned) { 1072 Type *Ty = CI.getType(); 1073 Value *LHS = CI.getArgOperand(0); 1074 Value *RHS = CI.getArgOperand(1); 1075 1076 CmpInst::Predicate Pred; 1077 switch (Imm) { 1078 case 0x0: 1079 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 1080 break; 1081 case 0x1: 1082 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 1083 break; 1084 case 0x2: 1085 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 1086 break; 1087 case 0x3: 1088 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 1089 break; 1090 case 0x4: 1091 Pred = ICmpInst::ICMP_EQ; 1092 break; 1093 case 0x5: 1094 Pred = ICmpInst::ICMP_NE; 1095 break; 1096 case 0x6: 1097 return Constant::getNullValue(Ty); // FALSE 1098 case 0x7: 1099 return Constant::getAllOnesValue(Ty); // TRUE 1100 default: 1101 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate"); 1102 } 1103 1104 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS); 1105 Value *Ext = Builder.CreateSExt(Cmp, Ty); 1106 return Ext; 1107 } 1108 1109 static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI, 1110 bool IsShiftRight, bool ZeroMask) { 1111 Type *Ty = CI.getType(); 1112 Value *Op0 = CI.getArgOperand(0); 1113 Value *Op1 = CI.getArgOperand(1); 1114 Value *Amt = CI.getArgOperand(2); 1115 1116 if (IsShiftRight) 1117 std::swap(Op0, Op1); 1118 1119 // Amount may be scalar immediate, in which case create a splat vector. 1120 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1121 // we only care about the lowest log2 bits anyway. 1122 if (Amt->getType() != Ty) { 1123 unsigned NumElts = Ty->getVectorNumElements(); 1124 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1125 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1126 } 1127 1128 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl; 1129 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1130 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt}); 1131 1132 unsigned NumArgs = CI.getNumArgOperands(); 1133 if (NumArgs >= 4) { // For masked intrinsics. 1134 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) : 1135 ZeroMask ? ConstantAggregateZero::get(CI.getType()) : 1136 CI.getArgOperand(0); 1137 Value *Mask = CI.getOperand(NumArgs - 1); 1138 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1139 } 1140 return Res; 1141 } 1142 1143 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 1144 Value *Ptr, Value *Data, Value *Mask, 1145 bool Aligned) { 1146 // Cast the pointer to the right type. 1147 Ptr = Builder.CreateBitCast(Ptr, 1148 llvm::PointerType::getUnqual(Data->getType())); 1149 unsigned Align = 1150 Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1; 1151 1152 // If the mask is all ones just emit a regular store. 1153 if (const auto *C = dyn_cast<Constant>(Mask)) 1154 if (C->isAllOnesValue()) 1155 return Builder.CreateAlignedStore(Data, Ptr, Align); 1156 1157 // Convert the mask from an integer type to a vector of i1. 1158 unsigned NumElts = Data->getType()->getVectorNumElements(); 1159 Mask = getX86MaskVec(Builder, Mask, NumElts); 1160 return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); 1161 } 1162 1163 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 1164 Value *Ptr, Value *Passthru, Value *Mask, 1165 bool Aligned) { 1166 Type *ValTy = Passthru->getType(); 1167 // Cast the pointer to the right type. 1168 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy)); 1169 unsigned Align = 1170 Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1; 1171 1172 // If the mask is all ones just emit a regular store. 1173 if (const auto *C = dyn_cast<Constant>(Mask)) 1174 if (C->isAllOnesValue()) 1175 return Builder.CreateAlignedLoad(ValTy, Ptr, Align); 1176 1177 // Convert the mask from an integer type to a vector of i1. 1178 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 1179 Mask = getX86MaskVec(Builder, Mask, NumElts); 1180 return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); 1181 } 1182 1183 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { 1184 Value *Op0 = CI.getArgOperand(0); 1185 llvm::Type *Ty = Op0->getType(); 1186 Value *Zero = llvm::Constant::getNullValue(Ty); 1187 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero); 1188 Value *Neg = Builder.CreateNeg(Op0); 1189 Value *Res = Builder.CreateSelect(Cmp, Op0, Neg); 1190 1191 if (CI.getNumArgOperands() == 3) 1192 Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1)); 1193 1194 return Res; 1195 } 1196 1197 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 1198 ICmpInst::Predicate Pred) { 1199 Value *Op0 = CI.getArgOperand(0); 1200 Value *Op1 = CI.getArgOperand(1); 1201 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 1202 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 1203 1204 if (CI.getNumArgOperands() == 4) 1205 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1206 1207 return Res; 1208 } 1209 1210 static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) { 1211 Type *Ty = CI.getType(); 1212 1213 // Arguments have a vXi32 type so cast to vXi64. 1214 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty); 1215 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty); 1216 1217 if (IsSigned) { 1218 // Shift left then arithmetic shift right. 1219 Constant *ShiftAmt = ConstantInt::get(Ty, 32); 1220 LHS = Builder.CreateShl(LHS, ShiftAmt); 1221 LHS = Builder.CreateAShr(LHS, ShiftAmt); 1222 RHS = Builder.CreateShl(RHS, ShiftAmt); 1223 RHS = Builder.CreateAShr(RHS, ShiftAmt); 1224 } else { 1225 // Clear the upper bits. 1226 Constant *Mask = ConstantInt::get(Ty, 0xffffffff); 1227 LHS = Builder.CreateAnd(LHS, Mask); 1228 RHS = Builder.CreateAnd(RHS, Mask); 1229 } 1230 1231 Value *Res = Builder.CreateMul(LHS, RHS); 1232 1233 if (CI.getNumArgOperands() == 4) 1234 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1235 1236 return Res; 1237 } 1238 1239 // Applying mask on vector of i1's and make sure result is at least 8 bits wide. 1240 static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, 1241 Value *Mask) { 1242 unsigned NumElts = Vec->getType()->getVectorNumElements(); 1243 if (Mask) { 1244 const auto *C = dyn_cast<Constant>(Mask); 1245 if (!C || !C->isAllOnesValue()) 1246 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts)); 1247 } 1248 1249 if (NumElts < 8) { 1250 uint32_t Indices[8]; 1251 for (unsigned i = 0; i != NumElts; ++i) 1252 Indices[i] = i; 1253 for (unsigned i = NumElts; i != 8; ++i) 1254 Indices[i] = NumElts + i % NumElts; 1255 Vec = Builder.CreateShuffleVector(Vec, 1256 Constant::getNullValue(Vec->getType()), 1257 Indices); 1258 } 1259 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U))); 1260 } 1261 1262 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 1263 unsigned CC, bool Signed) { 1264 Value *Op0 = CI.getArgOperand(0); 1265 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1266 1267 Value *Cmp; 1268 if (CC == 3) { 1269 Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1270 } else if (CC == 7) { 1271 Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1272 } else { 1273 ICmpInst::Predicate Pred; 1274 switch (CC) { 1275 default: llvm_unreachable("Unknown condition code"); 1276 case 0: Pred = ICmpInst::ICMP_EQ; break; 1277 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 1278 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 1279 case 4: Pred = ICmpInst::ICMP_NE; break; 1280 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 1281 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 1282 } 1283 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 1284 } 1285 1286 Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1); 1287 1288 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask); 1289 } 1290 1291 // Replace a masked intrinsic with an older unmasked intrinsic. 1292 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 1293 Intrinsic::ID IID) { 1294 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID); 1295 Value *Rep = Builder.CreateCall(Intrin, 1296 { CI.getArgOperand(0), CI.getArgOperand(1) }); 1297 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 1298 } 1299 1300 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 1301 Value* A = CI.getArgOperand(0); 1302 Value* B = CI.getArgOperand(1); 1303 Value* Src = CI.getArgOperand(2); 1304 Value* Mask = CI.getArgOperand(3); 1305 1306 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 1307 Value* Cmp = Builder.CreateIsNotNull(AndNode); 1308 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 1309 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 1310 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 1311 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 1312 } 1313 1314 1315 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 1316 Value* Op = CI.getArgOperand(0); 1317 Type* ReturnOp = CI.getType(); 1318 unsigned NumElts = CI.getType()->getVectorNumElements(); 1319 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 1320 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 1321 } 1322 1323 // Replace intrinsic with unmasked version and a select. 1324 static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, 1325 CallInst &CI, Value *&Rep) { 1326 Name = Name.substr(12); // Remove avx512.mask. 1327 1328 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits(); 1329 unsigned EltWidth = CI.getType()->getScalarSizeInBits(); 1330 Intrinsic::ID IID; 1331 if (Name.startswith("max.p")) { 1332 if (VecWidth == 128 && EltWidth == 32) 1333 IID = Intrinsic::x86_sse_max_ps; 1334 else if (VecWidth == 128 && EltWidth == 64) 1335 IID = Intrinsic::x86_sse2_max_pd; 1336 else if (VecWidth == 256 && EltWidth == 32) 1337 IID = Intrinsic::x86_avx_max_ps_256; 1338 else if (VecWidth == 256 && EltWidth == 64) 1339 IID = Intrinsic::x86_avx_max_pd_256; 1340 else 1341 llvm_unreachable("Unexpected intrinsic"); 1342 } else if (Name.startswith("min.p")) { 1343 if (VecWidth == 128 && EltWidth == 32) 1344 IID = Intrinsic::x86_sse_min_ps; 1345 else if (VecWidth == 128 && EltWidth == 64) 1346 IID = Intrinsic::x86_sse2_min_pd; 1347 else if (VecWidth == 256 && EltWidth == 32) 1348 IID = Intrinsic::x86_avx_min_ps_256; 1349 else if (VecWidth == 256 && EltWidth == 64) 1350 IID = Intrinsic::x86_avx_min_pd_256; 1351 else 1352 llvm_unreachable("Unexpected intrinsic"); 1353 } else if (Name.startswith("pshuf.b.")) { 1354 if (VecWidth == 128) 1355 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1356 else if (VecWidth == 256) 1357 IID = Intrinsic::x86_avx2_pshuf_b; 1358 else if (VecWidth == 512) 1359 IID = Intrinsic::x86_avx512_pshuf_b_512; 1360 else 1361 llvm_unreachable("Unexpected intrinsic"); 1362 } else if (Name.startswith("pmul.hr.sw.")) { 1363 if (VecWidth == 128) 1364 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128; 1365 else if (VecWidth == 256) 1366 IID = Intrinsic::x86_avx2_pmul_hr_sw; 1367 else if (VecWidth == 512) 1368 IID = Intrinsic::x86_avx512_pmul_hr_sw_512; 1369 else 1370 llvm_unreachable("Unexpected intrinsic"); 1371 } else if (Name.startswith("pmulh.w.")) { 1372 if (VecWidth == 128) 1373 IID = Intrinsic::x86_sse2_pmulh_w; 1374 else if (VecWidth == 256) 1375 IID = Intrinsic::x86_avx2_pmulh_w; 1376 else if (VecWidth == 512) 1377 IID = Intrinsic::x86_avx512_pmulh_w_512; 1378 else 1379 llvm_unreachable("Unexpected intrinsic"); 1380 } else if (Name.startswith("pmulhu.w.")) { 1381 if (VecWidth == 128) 1382 IID = Intrinsic::x86_sse2_pmulhu_w; 1383 else if (VecWidth == 256) 1384 IID = Intrinsic::x86_avx2_pmulhu_w; 1385 else if (VecWidth == 512) 1386 IID = Intrinsic::x86_avx512_pmulhu_w_512; 1387 else 1388 llvm_unreachable("Unexpected intrinsic"); 1389 } else if (Name.startswith("pmaddw.d.")) { 1390 if (VecWidth == 128) 1391 IID = Intrinsic::x86_sse2_pmadd_wd; 1392 else if (VecWidth == 256) 1393 IID = Intrinsic::x86_avx2_pmadd_wd; 1394 else if (VecWidth == 512) 1395 IID = Intrinsic::x86_avx512_pmaddw_d_512; 1396 else 1397 llvm_unreachable("Unexpected intrinsic"); 1398 } else if (Name.startswith("pmaddubs.w.")) { 1399 if (VecWidth == 128) 1400 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128; 1401 else if (VecWidth == 256) 1402 IID = Intrinsic::x86_avx2_pmadd_ub_sw; 1403 else if (VecWidth == 512) 1404 IID = Intrinsic::x86_avx512_pmaddubs_w_512; 1405 else 1406 llvm_unreachable("Unexpected intrinsic"); 1407 } else if (Name.startswith("packsswb.")) { 1408 if (VecWidth == 128) 1409 IID = Intrinsic::x86_sse2_packsswb_128; 1410 else if (VecWidth == 256) 1411 IID = Intrinsic::x86_avx2_packsswb; 1412 else if (VecWidth == 512) 1413 IID = Intrinsic::x86_avx512_packsswb_512; 1414 else 1415 llvm_unreachable("Unexpected intrinsic"); 1416 } else if (Name.startswith("packssdw.")) { 1417 if (VecWidth == 128) 1418 IID = Intrinsic::x86_sse2_packssdw_128; 1419 else if (VecWidth == 256) 1420 IID = Intrinsic::x86_avx2_packssdw; 1421 else if (VecWidth == 512) 1422 IID = Intrinsic::x86_avx512_packssdw_512; 1423 else 1424 llvm_unreachable("Unexpected intrinsic"); 1425 } else if (Name.startswith("packuswb.")) { 1426 if (VecWidth == 128) 1427 IID = Intrinsic::x86_sse2_packuswb_128; 1428 else if (VecWidth == 256) 1429 IID = Intrinsic::x86_avx2_packuswb; 1430 else if (VecWidth == 512) 1431 IID = Intrinsic::x86_avx512_packuswb_512; 1432 else 1433 llvm_unreachable("Unexpected intrinsic"); 1434 } else if (Name.startswith("packusdw.")) { 1435 if (VecWidth == 128) 1436 IID = Intrinsic::x86_sse41_packusdw; 1437 else if (VecWidth == 256) 1438 IID = Intrinsic::x86_avx2_packusdw; 1439 else if (VecWidth == 512) 1440 IID = Intrinsic::x86_avx512_packusdw_512; 1441 else 1442 llvm_unreachable("Unexpected intrinsic"); 1443 } else if (Name.startswith("vpermilvar.")) { 1444 if (VecWidth == 128 && EltWidth == 32) 1445 IID = Intrinsic::x86_avx_vpermilvar_ps; 1446 else if (VecWidth == 128 && EltWidth == 64) 1447 IID = Intrinsic::x86_avx_vpermilvar_pd; 1448 else if (VecWidth == 256 && EltWidth == 32) 1449 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 1450 else if (VecWidth == 256 && EltWidth == 64) 1451 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 1452 else if (VecWidth == 512 && EltWidth == 32) 1453 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 1454 else if (VecWidth == 512 && EltWidth == 64) 1455 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 1456 else 1457 llvm_unreachable("Unexpected intrinsic"); 1458 } else if (Name == "cvtpd2dq.256") { 1459 IID = Intrinsic::x86_avx_cvt_pd2dq_256; 1460 } else if (Name == "cvtpd2ps.256") { 1461 IID = Intrinsic::x86_avx_cvt_pd2_ps_256; 1462 } else if (Name == "cvttpd2dq.256") { 1463 IID = Intrinsic::x86_avx_cvtt_pd2dq_256; 1464 } else if (Name == "cvttps2dq.128") { 1465 IID = Intrinsic::x86_sse2_cvttps2dq; 1466 } else if (Name == "cvttps2dq.256") { 1467 IID = Intrinsic::x86_avx_cvtt_ps2dq_256; 1468 } else if (Name.startswith("permvar.")) { 1469 bool IsFloat = CI.getType()->isFPOrFPVectorTy(); 1470 if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1471 IID = Intrinsic::x86_avx2_permps; 1472 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1473 IID = Intrinsic::x86_avx2_permd; 1474 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1475 IID = Intrinsic::x86_avx512_permvar_df_256; 1476 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1477 IID = Intrinsic::x86_avx512_permvar_di_256; 1478 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1479 IID = Intrinsic::x86_avx512_permvar_sf_512; 1480 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1481 IID = Intrinsic::x86_avx512_permvar_si_512; 1482 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1483 IID = Intrinsic::x86_avx512_permvar_df_512; 1484 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1485 IID = Intrinsic::x86_avx512_permvar_di_512; 1486 else if (VecWidth == 128 && EltWidth == 16) 1487 IID = Intrinsic::x86_avx512_permvar_hi_128; 1488 else if (VecWidth == 256 && EltWidth == 16) 1489 IID = Intrinsic::x86_avx512_permvar_hi_256; 1490 else if (VecWidth == 512 && EltWidth == 16) 1491 IID = Intrinsic::x86_avx512_permvar_hi_512; 1492 else if (VecWidth == 128 && EltWidth == 8) 1493 IID = Intrinsic::x86_avx512_permvar_qi_128; 1494 else if (VecWidth == 256 && EltWidth == 8) 1495 IID = Intrinsic::x86_avx512_permvar_qi_256; 1496 else if (VecWidth == 512 && EltWidth == 8) 1497 IID = Intrinsic::x86_avx512_permvar_qi_512; 1498 else 1499 llvm_unreachable("Unexpected intrinsic"); 1500 } else if (Name.startswith("dbpsadbw.")) { 1501 if (VecWidth == 128) 1502 IID = Intrinsic::x86_avx512_dbpsadbw_128; 1503 else if (VecWidth == 256) 1504 IID = Intrinsic::x86_avx512_dbpsadbw_256; 1505 else if (VecWidth == 512) 1506 IID = Intrinsic::x86_avx512_dbpsadbw_512; 1507 else 1508 llvm_unreachable("Unexpected intrinsic"); 1509 } else if (Name.startswith("pmultishift.qb.")) { 1510 if (VecWidth == 128) 1511 IID = Intrinsic::x86_avx512_pmultishift_qb_128; 1512 else if (VecWidth == 256) 1513 IID = Intrinsic::x86_avx512_pmultishift_qb_256; 1514 else if (VecWidth == 512) 1515 IID = Intrinsic::x86_avx512_pmultishift_qb_512; 1516 else 1517 llvm_unreachable("Unexpected intrinsic"); 1518 } else if (Name.startswith("conflict.")) { 1519 if (Name[9] == 'd' && VecWidth == 128) 1520 IID = Intrinsic::x86_avx512_conflict_d_128; 1521 else if (Name[9] == 'd' && VecWidth == 256) 1522 IID = Intrinsic::x86_avx512_conflict_d_256; 1523 else if (Name[9] == 'd' && VecWidth == 512) 1524 IID = Intrinsic::x86_avx512_conflict_d_512; 1525 else if (Name[9] == 'q' && VecWidth == 128) 1526 IID = Intrinsic::x86_avx512_conflict_q_128; 1527 else if (Name[9] == 'q' && VecWidth == 256) 1528 IID = Intrinsic::x86_avx512_conflict_q_256; 1529 else if (Name[9] == 'q' && VecWidth == 512) 1530 IID = Intrinsic::x86_avx512_conflict_q_512; 1531 else 1532 llvm_unreachable("Unexpected intrinsic"); 1533 } else 1534 return false; 1535 1536 SmallVector<Value *, 4> Args(CI.arg_operands().begin(), 1537 CI.arg_operands().end()); 1538 Args.pop_back(); 1539 Args.pop_back(); 1540 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1541 Args); 1542 unsigned NumArgs = CI.getNumArgOperands(); 1543 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep, 1544 CI.getArgOperand(NumArgs - 2)); 1545 return true; 1546 } 1547 1548 /// Upgrade comment in call to inline asm that represents an objc retain release 1549 /// marker. 1550 void llvm::UpgradeInlineAsmString(std::string *AsmStr) { 1551 size_t Pos; 1552 if (AsmStr->find("mov\tfp") == 0 && 1553 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos && 1554 (Pos = AsmStr->find("# marker")) != std::string::npos) { 1555 AsmStr->replace(Pos, 1, ";"); 1556 } 1557 return; 1558 } 1559 1560 /// Upgrade a call to an old intrinsic. All argument and return casting must be 1561 /// provided to seamlessly integrate with existing context. 1562 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 1563 Function *F = CI->getCalledFunction(); 1564 LLVMContext &C = CI->getContext(); 1565 IRBuilder<> Builder(C); 1566 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 1567 1568 assert(F && "Intrinsic call is not direct?"); 1569 1570 if (!NewFn) { 1571 // Get the Function's name. 1572 StringRef Name = F->getName(); 1573 1574 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 1575 Name = Name.substr(5); 1576 1577 bool IsX86 = Name.startswith("x86."); 1578 if (IsX86) 1579 Name = Name.substr(4); 1580 bool IsNVVM = Name.startswith("nvvm."); 1581 if (IsNVVM) 1582 Name = Name.substr(5); 1583 1584 if (IsX86 && Name.startswith("sse4a.movnt.")) { 1585 Module *M = F->getParent(); 1586 SmallVector<Metadata *, 1> Elts; 1587 Elts.push_back( 1588 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1589 MDNode *Node = MDNode::get(C, Elts); 1590 1591 Value *Arg0 = CI->getArgOperand(0); 1592 Value *Arg1 = CI->getArgOperand(1); 1593 1594 // Nontemporal (unaligned) store of the 0'th element of the float/double 1595 // vector. 1596 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 1597 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 1598 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 1599 Value *Extract = 1600 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 1601 1602 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); 1603 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1604 1605 // Remove intrinsic. 1606 CI->eraseFromParent(); 1607 return; 1608 } 1609 1610 if (IsX86 && (Name.startswith("avx.movnt.") || 1611 Name.startswith("avx512.storent."))) { 1612 Module *M = F->getParent(); 1613 SmallVector<Metadata *, 1> Elts; 1614 Elts.push_back( 1615 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1616 MDNode *Node = MDNode::get(C, Elts); 1617 1618 Value *Arg0 = CI->getArgOperand(0); 1619 Value *Arg1 = CI->getArgOperand(1); 1620 1621 // Convert the type of the pointer to a pointer to the stored type. 1622 Value *BC = Builder.CreateBitCast(Arg0, 1623 PointerType::getUnqual(Arg1->getType()), 1624 "cast"); 1625 VectorType *VTy = cast<VectorType>(Arg1->getType()); 1626 StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 1627 VTy->getBitWidth() / 8); 1628 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1629 1630 // Remove intrinsic. 1631 CI->eraseFromParent(); 1632 return; 1633 } 1634 1635 if (IsX86 && Name == "sse2.storel.dq") { 1636 Value *Arg0 = CI->getArgOperand(0); 1637 Value *Arg1 = CI->getArgOperand(1); 1638 1639 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 1640 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 1641 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 1642 Value *BC = Builder.CreateBitCast(Arg0, 1643 PointerType::getUnqual(Elt->getType()), 1644 "cast"); 1645 Builder.CreateAlignedStore(Elt, BC, 1); 1646 1647 // Remove intrinsic. 1648 CI->eraseFromParent(); 1649 return; 1650 } 1651 1652 if (IsX86 && (Name.startswith("sse.storeu.") || 1653 Name.startswith("sse2.storeu.") || 1654 Name.startswith("avx.storeu."))) { 1655 Value *Arg0 = CI->getArgOperand(0); 1656 Value *Arg1 = CI->getArgOperand(1); 1657 1658 Arg0 = Builder.CreateBitCast(Arg0, 1659 PointerType::getUnqual(Arg1->getType()), 1660 "cast"); 1661 Builder.CreateAlignedStore(Arg1, Arg0, 1); 1662 1663 // Remove intrinsic. 1664 CI->eraseFromParent(); 1665 return; 1666 } 1667 1668 if (IsX86 && Name == "avx512.mask.store.ss") { 1669 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1)); 1670 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1671 Mask, false); 1672 1673 // Remove intrinsic. 1674 CI->eraseFromParent(); 1675 return; 1676 } 1677 1678 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 1679 // "avx512.mask.storeu." or "avx512.mask.store." 1680 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 1681 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1682 CI->getArgOperand(2), Aligned); 1683 1684 // Remove intrinsic. 1685 CI->eraseFromParent(); 1686 return; 1687 } 1688 1689 Value *Rep; 1690 // Upgrade packed integer vector compare intrinsics to compare instructions. 1691 if (IsX86 && (Name.startswith("sse2.pcmp") || 1692 Name.startswith("avx2.pcmp"))) { 1693 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 1694 bool CmpEq = Name[9] == 'e'; 1695 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 1696 CI->getArgOperand(0), CI->getArgOperand(1)); 1697 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 1698 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) { 1699 Type *ExtTy = Type::getInt32Ty(C); 1700 if (CI->getOperand(0)->getType()->isIntegerTy(8)) 1701 ExtTy = Type::getInt64Ty(C); 1702 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 1703 ExtTy->getPrimitiveSizeInBits(); 1704 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy); 1705 Rep = Builder.CreateVectorSplat(NumElts, Rep); 1706 } else if (IsX86 && (Name == "sse.sqrt.ss" || 1707 Name == "sse2.sqrt.sd")) { 1708 Value *Vec = CI->getArgOperand(0); 1709 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0); 1710 Function *Intr = Intrinsic::getDeclaration(F->getParent(), 1711 Intrinsic::sqrt, Elt0->getType()); 1712 Elt0 = Builder.CreateCall(Intr, Elt0); 1713 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0); 1714 } else if (IsX86 && (Name.startswith("avx.sqrt.p") || 1715 Name.startswith("sse2.sqrt.p") || 1716 Name.startswith("sse.sqrt.p"))) { 1717 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1718 Intrinsic::sqrt, 1719 CI->getType()), 1720 {CI->getArgOperand(0)}); 1721 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) { 1722 if (CI->getNumArgOperands() == 4 && 1723 (!isa<ConstantInt>(CI->getArgOperand(3)) || 1724 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 1725 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512 1726 : Intrinsic::x86_avx512_sqrt_pd_512; 1727 1728 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) }; 1729 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 1730 IID), Args); 1731 } else { 1732 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1733 Intrinsic::sqrt, 1734 CI->getType()), 1735 {CI->getArgOperand(0)}); 1736 } 1737 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1738 CI->getArgOperand(1)); 1739 } else if (IsX86 && (Name.startswith("avx512.ptestm") || 1740 Name.startswith("avx512.ptestnm"))) { 1741 Value *Op0 = CI->getArgOperand(0); 1742 Value *Op1 = CI->getArgOperand(1); 1743 Value *Mask = CI->getArgOperand(2); 1744 Rep = Builder.CreateAnd(Op0, Op1); 1745 llvm::Type *Ty = Op0->getType(); 1746 Value *Zero = llvm::Constant::getNullValue(Ty); 1747 ICmpInst::Predicate Pred = 1748 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ; 1749 Rep = Builder.CreateICmp(Pred, Rep, Zero); 1750 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); 1751 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ 1752 unsigned NumElts = 1753 CI->getArgOperand(1)->getType()->getVectorNumElements(); 1754 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); 1755 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1756 CI->getArgOperand(1)); 1757 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) { 1758 unsigned NumElts = CI->getType()->getScalarSizeInBits(); 1759 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts); 1760 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts); 1761 uint32_t Indices[64]; 1762 for (unsigned i = 0; i != NumElts; ++i) 1763 Indices[i] = i; 1764 1765 // First extract half of each vector. This gives better codegen than 1766 // doing it in a single shuffle. 1767 LHS = Builder.CreateShuffleVector(LHS, LHS, 1768 makeArrayRef(Indices, NumElts / 2)); 1769 RHS = Builder.CreateShuffleVector(RHS, RHS, 1770 makeArrayRef(Indices, NumElts / 2)); 1771 // Concat the vectors. 1772 // NOTE: Operands have to be swapped to match intrinsic definition. 1773 Rep = Builder.CreateShuffleVector(RHS, LHS, 1774 makeArrayRef(Indices, NumElts)); 1775 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1776 } else if (IsX86 && Name == "avx512.kand.w") { 1777 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1778 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1779 Rep = Builder.CreateAnd(LHS, RHS); 1780 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1781 } else if (IsX86 && Name == "avx512.kandn.w") { 1782 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1783 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1784 LHS = Builder.CreateNot(LHS); 1785 Rep = Builder.CreateAnd(LHS, RHS); 1786 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1787 } else if (IsX86 && Name == "avx512.kor.w") { 1788 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1789 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1790 Rep = Builder.CreateOr(LHS, RHS); 1791 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1792 } else if (IsX86 && Name == "avx512.kxor.w") { 1793 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1794 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1795 Rep = Builder.CreateXor(LHS, RHS); 1796 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1797 } else if (IsX86 && Name == "avx512.kxnor.w") { 1798 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1799 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1800 LHS = Builder.CreateNot(LHS); 1801 Rep = Builder.CreateXor(LHS, RHS); 1802 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1803 } else if (IsX86 && Name == "avx512.knot.w") { 1804 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1805 Rep = Builder.CreateNot(Rep); 1806 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1807 } else if (IsX86 && 1808 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) { 1809 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1810 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1811 Rep = Builder.CreateOr(LHS, RHS); 1812 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty()); 1813 Value *C; 1814 if (Name[14] == 'c') 1815 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty()); 1816 else 1817 C = ConstantInt::getNullValue(Builder.getInt16Ty()); 1818 Rep = Builder.CreateICmpEQ(Rep, C); 1819 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty()); 1820 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" || 1821 Name == "sse.sub.ss" || Name == "sse2.sub.sd" || 1822 Name == "sse.mul.ss" || Name == "sse2.mul.sd" || 1823 Name == "sse.div.ss" || Name == "sse2.div.sd")) { 1824 Type *I32Ty = Type::getInt32Ty(C); 1825 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1826 ConstantInt::get(I32Ty, 0)); 1827 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1828 ConstantInt::get(I32Ty, 0)); 1829 Value *EltOp; 1830 if (Name.contains(".add.")) 1831 EltOp = Builder.CreateFAdd(Elt0, Elt1); 1832 else if (Name.contains(".sub.")) 1833 EltOp = Builder.CreateFSub(Elt0, Elt1); 1834 else if (Name.contains(".mul.")) 1835 EltOp = Builder.CreateFMul(Elt0, Elt1); 1836 else 1837 EltOp = Builder.CreateFDiv(Elt0, Elt1); 1838 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp, 1839 ConstantInt::get(I32Ty, 0)); 1840 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 1841 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 1842 bool CmpEq = Name[16] == 'e'; 1843 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true); 1844 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) { 1845 Type *OpTy = CI->getArgOperand(0)->getType(); 1846 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1847 Intrinsic::ID IID; 1848 switch (VecWidth) { 1849 default: llvm_unreachable("Unexpected intrinsic"); 1850 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break; 1851 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break; 1852 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break; 1853 } 1854 1855 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1856 { CI->getOperand(0), CI->getArgOperand(1) }); 1857 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1858 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) { 1859 Type *OpTy = CI->getArgOperand(0)->getType(); 1860 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1861 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1862 Intrinsic::ID IID; 1863 if (VecWidth == 128 && EltWidth == 32) 1864 IID = Intrinsic::x86_avx512_fpclass_ps_128; 1865 else if (VecWidth == 256 && EltWidth == 32) 1866 IID = Intrinsic::x86_avx512_fpclass_ps_256; 1867 else if (VecWidth == 512 && EltWidth == 32) 1868 IID = Intrinsic::x86_avx512_fpclass_ps_512; 1869 else if (VecWidth == 128 && EltWidth == 64) 1870 IID = Intrinsic::x86_avx512_fpclass_pd_128; 1871 else if (VecWidth == 256 && EltWidth == 64) 1872 IID = Intrinsic::x86_avx512_fpclass_pd_256; 1873 else if (VecWidth == 512 && EltWidth == 64) 1874 IID = Intrinsic::x86_avx512_fpclass_pd_512; 1875 else 1876 llvm_unreachable("Unexpected intrinsic"); 1877 1878 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1879 { CI->getOperand(0), CI->getArgOperand(1) }); 1880 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1881 } else if (IsX86 && Name.startswith("avx512.mask.cmp.p")) { 1882 Type *OpTy = CI->getArgOperand(0)->getType(); 1883 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1884 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1885 Intrinsic::ID IID; 1886 if (VecWidth == 128 && EltWidth == 32) 1887 IID = Intrinsic::x86_avx512_cmp_ps_128; 1888 else if (VecWidth == 256 && EltWidth == 32) 1889 IID = Intrinsic::x86_avx512_cmp_ps_256; 1890 else if (VecWidth == 512 && EltWidth == 32) 1891 IID = Intrinsic::x86_avx512_cmp_ps_512; 1892 else if (VecWidth == 128 && EltWidth == 64) 1893 IID = Intrinsic::x86_avx512_cmp_pd_128; 1894 else if (VecWidth == 256 && EltWidth == 64) 1895 IID = Intrinsic::x86_avx512_cmp_pd_256; 1896 else if (VecWidth == 512 && EltWidth == 64) 1897 IID = Intrinsic::x86_avx512_cmp_pd_512; 1898 else 1899 llvm_unreachable("Unexpected intrinsic"); 1900 1901 SmallVector<Value *, 4> Args; 1902 Args.push_back(CI->getArgOperand(0)); 1903 Args.push_back(CI->getArgOperand(1)); 1904 Args.push_back(CI->getArgOperand(2)); 1905 if (CI->getNumArgOperands() == 5) 1906 Args.push_back(CI->getArgOperand(4)); 1907 1908 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1909 Args); 1910 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(3)); 1911 } else if (IsX86 && Name.startswith("avx512.mask.cmp.") && 1912 Name[16] != 'p') { 1913 // Integer compare intrinsics. 1914 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1915 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true); 1916 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) { 1917 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1918 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false); 1919 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") || 1920 Name.startswith("avx512.cvtw2mask.") || 1921 Name.startswith("avx512.cvtd2mask.") || 1922 Name.startswith("avx512.cvtq2mask."))) { 1923 Value *Op = CI->getArgOperand(0); 1924 Value *Zero = llvm::Constant::getNullValue(Op->getType()); 1925 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero); 1926 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr); 1927 } else if(IsX86 && (Name == "ssse3.pabs.b.128" || 1928 Name == "ssse3.pabs.w.128" || 1929 Name == "ssse3.pabs.d.128" || 1930 Name.startswith("avx2.pabs") || 1931 Name.startswith("avx512.mask.pabs"))) { 1932 Rep = upgradeAbs(Builder, *CI); 1933 } else if (IsX86 && (Name == "sse41.pmaxsb" || 1934 Name == "sse2.pmaxs.w" || 1935 Name == "sse41.pmaxsd" || 1936 Name.startswith("avx2.pmaxs") || 1937 Name.startswith("avx512.mask.pmaxs"))) { 1938 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 1939 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 1940 Name == "sse41.pmaxuw" || 1941 Name == "sse41.pmaxud" || 1942 Name.startswith("avx2.pmaxu") || 1943 Name.startswith("avx512.mask.pmaxu"))) { 1944 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 1945 } else if (IsX86 && (Name == "sse41.pminsb" || 1946 Name == "sse2.pmins.w" || 1947 Name == "sse41.pminsd" || 1948 Name.startswith("avx2.pmins") || 1949 Name.startswith("avx512.mask.pmins"))) { 1950 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 1951 } else if (IsX86 && (Name == "sse2.pminu.b" || 1952 Name == "sse41.pminuw" || 1953 Name == "sse41.pminud" || 1954 Name.startswith("avx2.pminu") || 1955 Name.startswith("avx512.mask.pminu"))) { 1956 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 1957 } else if (IsX86 && (Name == "sse2.pmulu.dq" || 1958 Name == "avx2.pmulu.dq" || 1959 Name == "avx512.pmulu.dq.512" || 1960 Name.startswith("avx512.mask.pmulu.dq."))) { 1961 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false); 1962 } else if (IsX86 && (Name == "sse41.pmuldq" || 1963 Name == "avx2.pmul.dq" || 1964 Name == "avx512.pmul.dq.512" || 1965 Name.startswith("avx512.mask.pmul.dq."))) { 1966 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true); 1967 } else if (IsX86 && (Name == "sse.cvtsi2ss" || 1968 Name == "sse2.cvtsi2sd" || 1969 Name == "sse.cvtsi642ss" || 1970 Name == "sse2.cvtsi642sd")) { 1971 Rep = Builder.CreateSIToFP(CI->getArgOperand(1), 1972 CI->getType()->getVectorElementType()); 1973 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1974 } else if (IsX86 && Name == "avx512.cvtusi2sd") { 1975 Rep = Builder.CreateUIToFP(CI->getArgOperand(1), 1976 CI->getType()->getVectorElementType()); 1977 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1978 } else if (IsX86 && Name == "sse2.cvtss2sd") { 1979 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0); 1980 Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType()); 1981 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1982 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 1983 Name == "sse2.cvtdq2ps" || 1984 Name == "avx.cvtdq2.pd.256" || 1985 Name == "avx.cvtdq2.ps.256" || 1986 Name.startswith("avx512.mask.cvtdq2pd.") || 1987 Name.startswith("avx512.mask.cvtudq2pd.") || 1988 Name.startswith("avx512.mask.cvtdq2ps.") || 1989 Name.startswith("avx512.mask.cvtudq2ps.") || 1990 Name.startswith("avx512.mask.cvtqq2pd.") || 1991 Name.startswith("avx512.mask.cvtuqq2pd.") || 1992 Name == "avx512.mask.cvtqq2ps.256" || 1993 Name == "avx512.mask.cvtqq2ps.512" || 1994 Name == "avx512.mask.cvtuqq2ps.256" || 1995 Name == "avx512.mask.cvtuqq2ps.512" || 1996 Name == "sse2.cvtps2pd" || 1997 Name == "avx.cvt.ps2.pd.256" || 1998 Name == "avx512.mask.cvtps2pd.128" || 1999 Name == "avx512.mask.cvtps2pd.256")) { 2000 Type *DstTy = CI->getType(); 2001 Rep = CI->getArgOperand(0); 2002 Type *SrcTy = Rep->getType(); 2003 2004 unsigned NumDstElts = DstTy->getVectorNumElements(); 2005 if (NumDstElts < SrcTy->getVectorNumElements()) { 2006 assert(NumDstElts == 2 && "Unexpected vector size"); 2007 uint32_t ShuffleMask[2] = { 0, 1 }; 2008 Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); 2009 } 2010 2011 bool IsPS2PD = SrcTy->getVectorElementType()->isFloatTy(); 2012 bool IsUnsigned = (StringRef::npos != Name.find("cvtu")); 2013 if (IsPS2PD) 2014 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 2015 else if (CI->getNumArgOperands() == 4 && 2016 (!isa<ConstantInt>(CI->getArgOperand(3)) || 2017 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 2018 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round 2019 : Intrinsic::x86_avx512_sitofp_round; 2020 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID, 2021 { DstTy, SrcTy }); 2022 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) }); 2023 } else { 2024 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt") 2025 : Builder.CreateSIToFP(Rep, DstTy, "cvt"); 2026 } 2027 2028 if (CI->getNumArgOperands() >= 3) 2029 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2030 CI->getArgOperand(1)); 2031 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 2032 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2033 CI->getArgOperand(1), CI->getArgOperand(2), 2034 /*Aligned*/false); 2035 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 2036 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2037 CI->getArgOperand(1),CI->getArgOperand(2), 2038 /*Aligned*/true); 2039 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { 2040 Type *ResultTy = CI->getType(); 2041 Type *PtrTy = ResultTy->getVectorElementType(); 2042 2043 // Cast the pointer to element type. 2044 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2045 llvm::PointerType::getUnqual(PtrTy)); 2046 2047 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2048 ResultTy->getVectorNumElements()); 2049 2050 Function *ELd = Intrinsic::getDeclaration(F->getParent(), 2051 Intrinsic::masked_expandload, 2052 ResultTy); 2053 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); 2054 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { 2055 Type *ResultTy = CI->getArgOperand(1)->getType(); 2056 Type *PtrTy = ResultTy->getVectorElementType(); 2057 2058 // Cast the pointer to element type. 2059 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2060 llvm::PointerType::getUnqual(PtrTy)); 2061 2062 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2063 ResultTy->getVectorNumElements()); 2064 2065 Function *CSt = Intrinsic::getDeclaration(F->getParent(), 2066 Intrinsic::masked_compressstore, 2067 ResultTy); 2068 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); 2069 } else if (IsX86 && (Name.startswith("avx512.mask.compress.") || 2070 Name.startswith("avx512.mask.expand."))) { 2071 Type *ResultTy = CI->getType(); 2072 2073 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2074 ResultTy->getVectorNumElements()); 2075 2076 bool IsCompress = Name[12] == 'c'; 2077 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress 2078 : Intrinsic::x86_avx512_mask_expand; 2079 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy); 2080 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1), 2081 MaskVec }); 2082 } else if (IsX86 && Name.startswith("xop.vpcom")) { 2083 bool IsSigned; 2084 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") || 2085 Name.endswith("uq")) 2086 IsSigned = false; 2087 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") || 2088 Name.endswith("q")) 2089 IsSigned = true; 2090 else 2091 llvm_unreachable("Unknown suffix"); 2092 2093 unsigned Imm; 2094 if (CI->getNumArgOperands() == 3) { 2095 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2096 } else { 2097 Name = Name.substr(9); // strip off "xop.vpcom" 2098 if (Name.startswith("lt")) 2099 Imm = 0; 2100 else if (Name.startswith("le")) 2101 Imm = 1; 2102 else if (Name.startswith("gt")) 2103 Imm = 2; 2104 else if (Name.startswith("ge")) 2105 Imm = 3; 2106 else if (Name.startswith("eq")) 2107 Imm = 4; 2108 else if (Name.startswith("ne")) 2109 Imm = 5; 2110 else if (Name.startswith("false")) 2111 Imm = 6; 2112 else if (Name.startswith("true")) 2113 Imm = 7; 2114 else 2115 llvm_unreachable("Unknown condition"); 2116 } 2117 2118 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned); 2119 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 2120 Value *Sel = CI->getArgOperand(2); 2121 Value *NotSel = Builder.CreateNot(Sel); 2122 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 2123 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 2124 Rep = Builder.CreateOr(Sel0, Sel1); 2125 } else if (IsX86 && (Name.startswith("xop.vprot") || 2126 Name.startswith("avx512.prol") || 2127 Name.startswith("avx512.mask.prol"))) { 2128 Rep = upgradeX86Rotate(Builder, *CI, false); 2129 } else if (IsX86 && (Name.startswith("avx512.pror") || 2130 Name.startswith("avx512.mask.pror"))) { 2131 Rep = upgradeX86Rotate(Builder, *CI, true); 2132 } else if (IsX86 && (Name.startswith("avx512.vpshld.") || 2133 Name.startswith("avx512.mask.vpshld") || 2134 Name.startswith("avx512.maskz.vpshld"))) { 2135 bool ZeroMask = Name[11] == 'z'; 2136 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask); 2137 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") || 2138 Name.startswith("avx512.mask.vpshrd") || 2139 Name.startswith("avx512.maskz.vpshrd"))) { 2140 bool ZeroMask = Name[11] == 'z'; 2141 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask); 2142 } else if (IsX86 && Name == "sse42.crc32.64.8") { 2143 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 2144 Intrinsic::x86_sse42_crc32_32_8); 2145 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 2146 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 2147 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 2148 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || 2149 Name.startswith("avx512.vbroadcast.s"))) { 2150 // Replace broadcasts with a series of insertelements. 2151 Type *VecTy = CI->getType(); 2152 Type *EltTy = VecTy->getVectorElementType(); 2153 unsigned EltNum = VecTy->getVectorNumElements(); 2154 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 2155 EltTy->getPointerTo()); 2156 Value *Load = Builder.CreateLoad(EltTy, Cast); 2157 Type *I32Ty = Type::getInt32Ty(C); 2158 Rep = UndefValue::get(VecTy); 2159 for (unsigned I = 0; I < EltNum; ++I) 2160 Rep = Builder.CreateInsertElement(Rep, Load, 2161 ConstantInt::get(I32Ty, I)); 2162 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 2163 Name.startswith("sse41.pmovzx") || 2164 Name.startswith("avx2.pmovsx") || 2165 Name.startswith("avx2.pmovzx") || 2166 Name.startswith("avx512.mask.pmovsx") || 2167 Name.startswith("avx512.mask.pmovzx"))) { 2168 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 2169 VectorType *DstTy = cast<VectorType>(CI->getType()); 2170 unsigned NumDstElts = DstTy->getNumElements(); 2171 2172 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 2173 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2174 for (unsigned i = 0; i != NumDstElts; ++i) 2175 ShuffleMask[i] = i; 2176 2177 Value *SV = Builder.CreateShuffleVector( 2178 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 2179 2180 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 2181 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 2182 : Builder.CreateZExt(SV, DstTy); 2183 // If there are 3 arguments, it's a masked intrinsic so we need a select. 2184 if (CI->getNumArgOperands() == 3) 2185 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2186 CI->getArgOperand(1)); 2187 } else if (Name == "avx512.mask.pmov.qd.256" || 2188 Name == "avx512.mask.pmov.qd.512" || 2189 Name == "avx512.mask.pmov.wb.256" || 2190 Name == "avx512.mask.pmov.wb.512") { 2191 Type *Ty = CI->getArgOperand(1)->getType(); 2192 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty); 2193 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2194 CI->getArgOperand(1)); 2195 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 2196 Name == "avx2.vbroadcasti128")) { 2197 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 2198 Type *EltTy = CI->getType()->getVectorElementType(); 2199 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 2200 Type *VT = VectorType::get(EltTy, NumSrcElts); 2201 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 2202 PointerType::getUnqual(VT)); 2203 Value *Load = Builder.CreateAlignedLoad(VT, Op, 1); 2204 if (NumSrcElts == 2) 2205 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2206 { 0, 1, 0, 1 }); 2207 else 2208 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2209 { 0, 1, 2, 3, 0, 1, 2, 3 }); 2210 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") || 2211 Name.startswith("avx512.mask.shuf.f"))) { 2212 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2213 Type *VT = CI->getType(); 2214 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128; 2215 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits(); 2216 unsigned ControlBitsMask = NumLanes - 1; 2217 unsigned NumControlBits = NumLanes / 2; 2218 SmallVector<uint32_t, 8> ShuffleMask(0); 2219 2220 for (unsigned l = 0; l != NumLanes; ++l) { 2221 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask; 2222 // We actually need the other source. 2223 if (l >= NumLanes / 2) 2224 LaneMask += NumLanes; 2225 for (unsigned i = 0; i != NumElementsInLane; ++i) 2226 ShuffleMask.push_back(LaneMask * NumElementsInLane + i); 2227 } 2228 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2229 CI->getArgOperand(1), ShuffleMask); 2230 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2231 CI->getArgOperand(3)); 2232 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || 2233 Name.startswith("avx512.mask.broadcasti"))) { 2234 unsigned NumSrcElts = 2235 CI->getArgOperand(0)->getType()->getVectorNumElements(); 2236 unsigned NumDstElts = CI->getType()->getVectorNumElements(); 2237 2238 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2239 for (unsigned i = 0; i != NumDstElts; ++i) 2240 ShuffleMask[i] = i % NumSrcElts; 2241 2242 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2243 CI->getArgOperand(0), 2244 ShuffleMask); 2245 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2246 CI->getArgOperand(1)); 2247 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 2248 Name.startswith("avx2.vbroadcast") || 2249 Name.startswith("avx512.pbroadcast") || 2250 Name.startswith("avx512.mask.broadcast.s"))) { 2251 // Replace vp?broadcasts with a vector shuffle. 2252 Value *Op = CI->getArgOperand(0); 2253 unsigned NumElts = CI->getType()->getVectorNumElements(); 2254 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 2255 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 2256 Constant::getNullValue(MaskTy)); 2257 2258 if (CI->getNumArgOperands() == 3) 2259 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2260 CI->getArgOperand(1)); 2261 } else if (IsX86 && (Name.startswith("sse2.padds.") || 2262 Name.startswith("sse2.psubs.") || 2263 Name.startswith("avx2.padds.") || 2264 Name.startswith("avx2.psubs.") || 2265 Name.startswith("avx512.padds.") || 2266 Name.startswith("avx512.psubs.") || 2267 Name.startswith("avx512.mask.padds.") || 2268 Name.startswith("avx512.mask.psubs."))) { 2269 bool IsAdd = Name.contains(".padds"); 2270 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, true, IsAdd); 2271 } else if (IsX86 && (Name.startswith("sse2.paddus.") || 2272 Name.startswith("sse2.psubus.") || 2273 Name.startswith("avx2.paddus.") || 2274 Name.startswith("avx2.psubus.") || 2275 Name.startswith("avx512.mask.paddus.") || 2276 Name.startswith("avx512.mask.psubus."))) { 2277 bool IsAdd = Name.contains(".paddus"); 2278 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, false, IsAdd); 2279 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 2280 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2281 CI->getArgOperand(1), 2282 CI->getArgOperand(2), 2283 CI->getArgOperand(3), 2284 CI->getArgOperand(4), 2285 false); 2286 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 2287 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2288 CI->getArgOperand(1), 2289 CI->getArgOperand(2), 2290 CI->getArgOperand(3), 2291 CI->getArgOperand(4), 2292 true); 2293 } else if (IsX86 && (Name == "sse2.psll.dq" || 2294 Name == "avx2.psll.dq")) { 2295 // 128/256-bit shift left specified in bits. 2296 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2297 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 2298 Shift / 8); // Shift is in bits. 2299 } else if (IsX86 && (Name == "sse2.psrl.dq" || 2300 Name == "avx2.psrl.dq")) { 2301 // 128/256-bit shift right specified in bits. 2302 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2303 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 2304 Shift / 8); // Shift is in bits. 2305 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 2306 Name == "avx2.psll.dq.bs" || 2307 Name == "avx512.psll.dq.512")) { 2308 // 128/256/512-bit shift left specified in bytes. 2309 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2310 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2311 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 2312 Name == "avx2.psrl.dq.bs" || 2313 Name == "avx512.psrl.dq.512")) { 2314 // 128/256/512-bit shift right specified in bytes. 2315 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2316 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2317 } else if (IsX86 && (Name == "sse41.pblendw" || 2318 Name.startswith("sse41.blendp") || 2319 Name.startswith("avx.blend.p") || 2320 Name == "avx2.pblendw" || 2321 Name.startswith("avx2.pblendd."))) { 2322 Value *Op0 = CI->getArgOperand(0); 2323 Value *Op1 = CI->getArgOperand(1); 2324 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2325 VectorType *VecTy = cast<VectorType>(CI->getType()); 2326 unsigned NumElts = VecTy->getNumElements(); 2327 2328 SmallVector<uint32_t, 16> Idxs(NumElts); 2329 for (unsigned i = 0; i != NumElts; ++i) 2330 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 2331 2332 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2333 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 2334 Name == "avx2.vinserti128" || 2335 Name.startswith("avx512.mask.insert"))) { 2336 Value *Op0 = CI->getArgOperand(0); 2337 Value *Op1 = CI->getArgOperand(1); 2338 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2339 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2340 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 2341 unsigned Scale = DstNumElts / SrcNumElts; 2342 2343 // Mask off the high bits of the immediate value; hardware ignores those. 2344 Imm = Imm % Scale; 2345 2346 // Extend the second operand into a vector the size of the destination. 2347 Value *UndefV = UndefValue::get(Op1->getType()); 2348 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2349 for (unsigned i = 0; i != SrcNumElts; ++i) 2350 Idxs[i] = i; 2351 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 2352 Idxs[i] = SrcNumElts; 2353 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 2354 2355 // Insert the second operand into the first operand. 2356 2357 // Note that there is no guarantee that instruction lowering will actually 2358 // produce a vinsertf128 instruction for the created shuffles. In 2359 // particular, the 0 immediate case involves no lane changes, so it can 2360 // be handled as a blend. 2361 2362 // Example of shuffle mask for 32-bit elements: 2363 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 2364 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 2365 2366 // First fill with identify mask. 2367 for (unsigned i = 0; i != DstNumElts; ++i) 2368 Idxs[i] = i; 2369 // Then replace the elements where we need to insert. 2370 for (unsigned i = 0; i != SrcNumElts; ++i) 2371 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 2372 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 2373 2374 // If the intrinsic has a mask operand, handle that. 2375 if (CI->getNumArgOperands() == 5) 2376 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2377 CI->getArgOperand(3)); 2378 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 2379 Name == "avx2.vextracti128" || 2380 Name.startswith("avx512.mask.vextract"))) { 2381 Value *Op0 = CI->getArgOperand(0); 2382 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2383 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2384 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 2385 unsigned Scale = SrcNumElts / DstNumElts; 2386 2387 // Mask off the high bits of the immediate value; hardware ignores those. 2388 Imm = Imm % Scale; 2389 2390 // Get indexes for the subvector of the input vector. 2391 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2392 for (unsigned i = 0; i != DstNumElts; ++i) { 2393 Idxs[i] = i + (Imm * DstNumElts); 2394 } 2395 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2396 2397 // If the intrinsic has a mask operand, handle that. 2398 if (CI->getNumArgOperands() == 4) 2399 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2400 CI->getArgOperand(2)); 2401 } else if (!IsX86 && Name == "stackprotectorcheck") { 2402 Rep = nullptr; 2403 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 2404 Name.startswith("avx512.mask.perm.di."))) { 2405 Value *Op0 = CI->getArgOperand(0); 2406 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2407 VectorType *VecTy = cast<VectorType>(CI->getType()); 2408 unsigned NumElts = VecTy->getNumElements(); 2409 2410 SmallVector<uint32_t, 8> Idxs(NumElts); 2411 for (unsigned i = 0; i != NumElts; ++i) 2412 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 2413 2414 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2415 2416 if (CI->getNumArgOperands() == 4) 2417 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2418 CI->getArgOperand(2)); 2419 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") || 2420 Name == "avx2.vperm2i128")) { 2421 // The immediate permute control byte looks like this: 2422 // [1:0] - select 128 bits from sources for low half of destination 2423 // [2] - ignore 2424 // [3] - zero low half of destination 2425 // [5:4] - select 128 bits from sources for high half of destination 2426 // [6] - ignore 2427 // [7] - zero high half of destination 2428 2429 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2430 2431 unsigned NumElts = CI->getType()->getVectorNumElements(); 2432 unsigned HalfSize = NumElts / 2; 2433 SmallVector<uint32_t, 8> ShuffleMask(NumElts); 2434 2435 // Determine which operand(s) are actually in use for this instruction. 2436 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2437 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2438 2439 // If needed, replace operands based on zero mask. 2440 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0; 2441 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1; 2442 2443 // Permute low half of result. 2444 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0; 2445 for (unsigned i = 0; i < HalfSize; ++i) 2446 ShuffleMask[i] = StartIndex + i; 2447 2448 // Permute high half of result. 2449 StartIndex = (Imm & 0x10) ? HalfSize : 0; 2450 for (unsigned i = 0; i < HalfSize; ++i) 2451 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i; 2452 2453 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2454 2455 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 2456 Name == "sse2.pshuf.d" || 2457 Name.startswith("avx512.mask.vpermil.p") || 2458 Name.startswith("avx512.mask.pshuf.d."))) { 2459 Value *Op0 = CI->getArgOperand(0); 2460 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2461 VectorType *VecTy = cast<VectorType>(CI->getType()); 2462 unsigned NumElts = VecTy->getNumElements(); 2463 // Calculate the size of each index in the immediate. 2464 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 2465 unsigned IdxMask = ((1 << IdxSize) - 1); 2466 2467 SmallVector<uint32_t, 8> Idxs(NumElts); 2468 // Lookup the bits for this element, wrapping around the immediate every 2469 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 2470 // to offset by the first index of each group. 2471 for (unsigned i = 0; i != NumElts; ++i) 2472 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 2473 2474 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2475 2476 if (CI->getNumArgOperands() == 4) 2477 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2478 CI->getArgOperand(2)); 2479 } else if (IsX86 && (Name == "sse2.pshufl.w" || 2480 Name.startswith("avx512.mask.pshufl.w."))) { 2481 Value *Op0 = CI->getArgOperand(0); 2482 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2483 unsigned NumElts = CI->getType()->getVectorNumElements(); 2484 2485 SmallVector<uint32_t, 16> Idxs(NumElts); 2486 for (unsigned l = 0; l != NumElts; l += 8) { 2487 for (unsigned i = 0; i != 4; ++i) 2488 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 2489 for (unsigned i = 4; i != 8; ++i) 2490 Idxs[i + l] = i + l; 2491 } 2492 2493 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2494 2495 if (CI->getNumArgOperands() == 4) 2496 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2497 CI->getArgOperand(2)); 2498 } else if (IsX86 && (Name == "sse2.pshufh.w" || 2499 Name.startswith("avx512.mask.pshufh.w."))) { 2500 Value *Op0 = CI->getArgOperand(0); 2501 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2502 unsigned NumElts = CI->getType()->getVectorNumElements(); 2503 2504 SmallVector<uint32_t, 16> Idxs(NumElts); 2505 for (unsigned l = 0; l != NumElts; l += 8) { 2506 for (unsigned i = 0; i != 4; ++i) 2507 Idxs[i + l] = i + l; 2508 for (unsigned i = 0; i != 4; ++i) 2509 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 2510 } 2511 2512 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2513 2514 if (CI->getNumArgOperands() == 4) 2515 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2516 CI->getArgOperand(2)); 2517 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 2518 Value *Op0 = CI->getArgOperand(0); 2519 Value *Op1 = CI->getArgOperand(1); 2520 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2521 unsigned NumElts = CI->getType()->getVectorNumElements(); 2522 2523 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2524 unsigned HalfLaneElts = NumLaneElts / 2; 2525 2526 SmallVector<uint32_t, 16> Idxs(NumElts); 2527 for (unsigned i = 0; i != NumElts; ++i) { 2528 // Base index is the starting element of the lane. 2529 Idxs[i] = i - (i % NumLaneElts); 2530 // If we are half way through the lane switch to the other source. 2531 if ((i % NumLaneElts) >= HalfLaneElts) 2532 Idxs[i] += NumElts; 2533 // Now select the specific element. By adding HalfLaneElts bits from 2534 // the immediate. Wrapping around the immediate every 8-bits. 2535 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 2536 } 2537 2538 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2539 2540 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2541 CI->getArgOperand(3)); 2542 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 2543 Name.startswith("avx512.mask.movshdup") || 2544 Name.startswith("avx512.mask.movsldup"))) { 2545 Value *Op0 = CI->getArgOperand(0); 2546 unsigned NumElts = CI->getType()->getVectorNumElements(); 2547 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2548 2549 unsigned Offset = 0; 2550 if (Name.startswith("avx512.mask.movshdup.")) 2551 Offset = 1; 2552 2553 SmallVector<uint32_t, 16> Idxs(NumElts); 2554 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 2555 for (unsigned i = 0; i != NumLaneElts; i += 2) { 2556 Idxs[i + l + 0] = i + l + Offset; 2557 Idxs[i + l + 1] = i + l + Offset; 2558 } 2559 2560 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2561 2562 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2563 CI->getArgOperand(1)); 2564 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 2565 Name.startswith("avx512.mask.unpckl."))) { 2566 Value *Op0 = CI->getArgOperand(0); 2567 Value *Op1 = CI->getArgOperand(1); 2568 int NumElts = CI->getType()->getVectorNumElements(); 2569 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2570 2571 SmallVector<uint32_t, 64> Idxs(NumElts); 2572 for (int l = 0; l != NumElts; l += NumLaneElts) 2573 for (int i = 0; i != NumLaneElts; ++i) 2574 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 2575 2576 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2577 2578 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2579 CI->getArgOperand(2)); 2580 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 2581 Name.startswith("avx512.mask.unpckh."))) { 2582 Value *Op0 = CI->getArgOperand(0); 2583 Value *Op1 = CI->getArgOperand(1); 2584 int NumElts = CI->getType()->getVectorNumElements(); 2585 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2586 2587 SmallVector<uint32_t, 64> Idxs(NumElts); 2588 for (int l = 0; l != NumElts; l += NumLaneElts) 2589 for (int i = 0; i != NumLaneElts; ++i) 2590 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 2591 2592 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2593 2594 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2595 CI->getArgOperand(2)); 2596 } else if (IsX86 && (Name.startswith("avx512.mask.and.") || 2597 Name.startswith("avx512.mask.pand."))) { 2598 VectorType *FTy = cast<VectorType>(CI->getType()); 2599 VectorType *ITy = VectorType::getInteger(FTy); 2600 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2601 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2602 Rep = Builder.CreateBitCast(Rep, FTy); 2603 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2604 CI->getArgOperand(2)); 2605 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") || 2606 Name.startswith("avx512.mask.pandn."))) { 2607 VectorType *FTy = cast<VectorType>(CI->getType()); 2608 VectorType *ITy = VectorType::getInteger(FTy); 2609 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 2610 Rep = Builder.CreateAnd(Rep, 2611 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2612 Rep = Builder.CreateBitCast(Rep, FTy); 2613 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2614 CI->getArgOperand(2)); 2615 } else if (IsX86 && (Name.startswith("avx512.mask.or.") || 2616 Name.startswith("avx512.mask.por."))) { 2617 VectorType *FTy = cast<VectorType>(CI->getType()); 2618 VectorType *ITy = VectorType::getInteger(FTy); 2619 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2620 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2621 Rep = Builder.CreateBitCast(Rep, FTy); 2622 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2623 CI->getArgOperand(2)); 2624 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") || 2625 Name.startswith("avx512.mask.pxor."))) { 2626 VectorType *FTy = cast<VectorType>(CI->getType()); 2627 VectorType *ITy = VectorType::getInteger(FTy); 2628 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2629 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2630 Rep = Builder.CreateBitCast(Rep, FTy); 2631 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2632 CI->getArgOperand(2)); 2633 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 2634 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2635 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2636 CI->getArgOperand(2)); 2637 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 2638 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2639 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2640 CI->getArgOperand(2)); 2641 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 2642 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2643 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2644 CI->getArgOperand(2)); 2645 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) { 2646 if (Name.endswith(".512")) { 2647 Intrinsic::ID IID; 2648 if (Name[17] == 's') 2649 IID = Intrinsic::x86_avx512_add_ps_512; 2650 else 2651 IID = Intrinsic::x86_avx512_add_pd_512; 2652 2653 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2654 { CI->getArgOperand(0), CI->getArgOperand(1), 2655 CI->getArgOperand(4) }); 2656 } else { 2657 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2658 } 2659 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2660 CI->getArgOperand(2)); 2661 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 2662 if (Name.endswith(".512")) { 2663 Intrinsic::ID IID; 2664 if (Name[17] == 's') 2665 IID = Intrinsic::x86_avx512_div_ps_512; 2666 else 2667 IID = Intrinsic::x86_avx512_div_pd_512; 2668 2669 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2670 { CI->getArgOperand(0), CI->getArgOperand(1), 2671 CI->getArgOperand(4) }); 2672 } else { 2673 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 2674 } 2675 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2676 CI->getArgOperand(2)); 2677 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 2678 if (Name.endswith(".512")) { 2679 Intrinsic::ID IID; 2680 if (Name[17] == 's') 2681 IID = Intrinsic::x86_avx512_mul_ps_512; 2682 else 2683 IID = Intrinsic::x86_avx512_mul_pd_512; 2684 2685 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2686 { CI->getArgOperand(0), CI->getArgOperand(1), 2687 CI->getArgOperand(4) }); 2688 } else { 2689 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2690 } 2691 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2692 CI->getArgOperand(2)); 2693 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 2694 if (Name.endswith(".512")) { 2695 Intrinsic::ID IID; 2696 if (Name[17] == 's') 2697 IID = Intrinsic::x86_avx512_sub_ps_512; 2698 else 2699 IID = Intrinsic::x86_avx512_sub_pd_512; 2700 2701 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2702 { CI->getArgOperand(0), CI->getArgOperand(1), 2703 CI->getArgOperand(4) }); 2704 } else { 2705 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2706 } 2707 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2708 CI->getArgOperand(2)); 2709 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") || 2710 Name.startswith("avx512.mask.min.p")) && 2711 Name.drop_front(18) == ".512") { 2712 bool IsDouble = Name[17] == 'd'; 2713 bool IsMin = Name[13] == 'i'; 2714 static const Intrinsic::ID MinMaxTbl[2][2] = { 2715 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 }, 2716 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 } 2717 }; 2718 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble]; 2719 2720 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2721 { CI->getArgOperand(0), CI->getArgOperand(1), 2722 CI->getArgOperand(4) }); 2723 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2724 CI->getArgOperand(2)); 2725 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 2726 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 2727 Intrinsic::ctlz, 2728 CI->getType()), 2729 { CI->getArgOperand(0), Builder.getInt1(false) }); 2730 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2731 CI->getArgOperand(1)); 2732 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 2733 bool IsImmediate = Name[16] == 'i' || 2734 (Name.size() > 18 && Name[18] == 'i'); 2735 bool IsVariable = Name[16] == 'v'; 2736 char Size = Name[16] == '.' ? Name[17] : 2737 Name[17] == '.' ? Name[18] : 2738 Name[18] == '.' ? Name[19] : 2739 Name[20]; 2740 2741 Intrinsic::ID IID; 2742 if (IsVariable && Name[17] != '.') { 2743 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 2744 IID = Intrinsic::x86_avx2_psllv_q; 2745 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 2746 IID = Intrinsic::x86_avx2_psllv_q_256; 2747 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 2748 IID = Intrinsic::x86_avx2_psllv_d; 2749 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 2750 IID = Intrinsic::x86_avx2_psllv_d_256; 2751 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 2752 IID = Intrinsic::x86_avx512_psllv_w_128; 2753 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 2754 IID = Intrinsic::x86_avx512_psllv_w_256; 2755 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 2756 IID = Intrinsic::x86_avx512_psllv_w_512; 2757 else 2758 llvm_unreachable("Unexpected size"); 2759 } else if (Name.endswith(".128")) { 2760 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 2761 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 2762 : Intrinsic::x86_sse2_psll_d; 2763 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 2764 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 2765 : Intrinsic::x86_sse2_psll_q; 2766 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 2767 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 2768 : Intrinsic::x86_sse2_psll_w; 2769 else 2770 llvm_unreachable("Unexpected size"); 2771 } else if (Name.endswith(".256")) { 2772 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 2773 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 2774 : Intrinsic::x86_avx2_psll_d; 2775 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 2776 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 2777 : Intrinsic::x86_avx2_psll_q; 2778 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 2779 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 2780 : Intrinsic::x86_avx2_psll_w; 2781 else 2782 llvm_unreachable("Unexpected size"); 2783 } else { 2784 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 2785 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 2786 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 2787 Intrinsic::x86_avx512_psll_d_512; 2788 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 2789 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 2790 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 2791 Intrinsic::x86_avx512_psll_q_512; 2792 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 2793 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 2794 : Intrinsic::x86_avx512_psll_w_512; 2795 else 2796 llvm_unreachable("Unexpected size"); 2797 } 2798 2799 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2800 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 2801 bool IsImmediate = Name[16] == 'i' || 2802 (Name.size() > 18 && Name[18] == 'i'); 2803 bool IsVariable = Name[16] == 'v'; 2804 char Size = Name[16] == '.' ? Name[17] : 2805 Name[17] == '.' ? Name[18] : 2806 Name[18] == '.' ? Name[19] : 2807 Name[20]; 2808 2809 Intrinsic::ID IID; 2810 if (IsVariable && Name[17] != '.') { 2811 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 2812 IID = Intrinsic::x86_avx2_psrlv_q; 2813 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 2814 IID = Intrinsic::x86_avx2_psrlv_q_256; 2815 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 2816 IID = Intrinsic::x86_avx2_psrlv_d; 2817 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 2818 IID = Intrinsic::x86_avx2_psrlv_d_256; 2819 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 2820 IID = Intrinsic::x86_avx512_psrlv_w_128; 2821 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 2822 IID = Intrinsic::x86_avx512_psrlv_w_256; 2823 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 2824 IID = Intrinsic::x86_avx512_psrlv_w_512; 2825 else 2826 llvm_unreachable("Unexpected size"); 2827 } else if (Name.endswith(".128")) { 2828 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 2829 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 2830 : Intrinsic::x86_sse2_psrl_d; 2831 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 2832 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 2833 : Intrinsic::x86_sse2_psrl_q; 2834 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 2835 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 2836 : Intrinsic::x86_sse2_psrl_w; 2837 else 2838 llvm_unreachable("Unexpected size"); 2839 } else if (Name.endswith(".256")) { 2840 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 2841 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 2842 : Intrinsic::x86_avx2_psrl_d; 2843 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 2844 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 2845 : Intrinsic::x86_avx2_psrl_q; 2846 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 2847 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 2848 : Intrinsic::x86_avx2_psrl_w; 2849 else 2850 llvm_unreachable("Unexpected size"); 2851 } else { 2852 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 2853 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 2854 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 2855 Intrinsic::x86_avx512_psrl_d_512; 2856 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 2857 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 2858 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 2859 Intrinsic::x86_avx512_psrl_q_512; 2860 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 2861 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 2862 : Intrinsic::x86_avx512_psrl_w_512; 2863 else 2864 llvm_unreachable("Unexpected size"); 2865 } 2866 2867 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2868 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 2869 bool IsImmediate = Name[16] == 'i' || 2870 (Name.size() > 18 && Name[18] == 'i'); 2871 bool IsVariable = Name[16] == 'v'; 2872 char Size = Name[16] == '.' ? Name[17] : 2873 Name[17] == '.' ? Name[18] : 2874 Name[18] == '.' ? Name[19] : 2875 Name[20]; 2876 2877 Intrinsic::ID IID; 2878 if (IsVariable && Name[17] != '.') { 2879 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 2880 IID = Intrinsic::x86_avx2_psrav_d; 2881 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 2882 IID = Intrinsic::x86_avx2_psrav_d_256; 2883 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 2884 IID = Intrinsic::x86_avx512_psrav_w_128; 2885 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 2886 IID = Intrinsic::x86_avx512_psrav_w_256; 2887 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 2888 IID = Intrinsic::x86_avx512_psrav_w_512; 2889 else 2890 llvm_unreachable("Unexpected size"); 2891 } else if (Name.endswith(".128")) { 2892 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 2893 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 2894 : Intrinsic::x86_sse2_psra_d; 2895 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 2896 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 2897 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 2898 Intrinsic::x86_avx512_psra_q_128; 2899 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 2900 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 2901 : Intrinsic::x86_sse2_psra_w; 2902 else 2903 llvm_unreachable("Unexpected size"); 2904 } else if (Name.endswith(".256")) { 2905 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 2906 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 2907 : Intrinsic::x86_avx2_psra_d; 2908 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 2909 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 2910 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 2911 Intrinsic::x86_avx512_psra_q_256; 2912 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 2913 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 2914 : Intrinsic::x86_avx2_psra_w; 2915 else 2916 llvm_unreachable("Unexpected size"); 2917 } else { 2918 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 2919 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 2920 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 2921 Intrinsic::x86_avx512_psra_d_512; 2922 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 2923 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 2924 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 2925 Intrinsic::x86_avx512_psra_q_512; 2926 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 2927 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 2928 : Intrinsic::x86_avx512_psra_w_512; 2929 else 2930 llvm_unreachable("Unexpected size"); 2931 } 2932 2933 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2934 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 2935 Rep = upgradeMaskedMove(Builder, *CI); 2936 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 2937 Rep = UpgradeMaskToInt(Builder, *CI); 2938 } else if (IsX86 && Name.endswith(".movntdqa")) { 2939 Module *M = F->getParent(); 2940 MDNode *Node = MDNode::get( 2941 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 2942 2943 Value *Ptr = CI->getArgOperand(0); 2944 VectorType *VTy = cast<VectorType>(CI->getType()); 2945 2946 // Convert the type of the pointer to a pointer to the stored type. 2947 Value *BC = 2948 Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); 2949 LoadInst *LI = Builder.CreateAlignedLoad(VTy, BC, VTy->getBitWidth() / 8); 2950 LI->setMetadata(M->getMDKindID("nontemporal"), Node); 2951 Rep = LI; 2952 } else if (IsX86 && 2953 (Name.startswith("sse2.pavg") || Name.startswith("avx2.pavg") || 2954 Name.startswith("avx512.mask.pavg"))) { 2955 // llvm.x86.sse2.pavg.b/w, llvm.x86.avx2.pavg.b/w, 2956 // llvm.x86.avx512.mask.pavg.b/w 2957 Value *A = CI->getArgOperand(0); 2958 Value *B = CI->getArgOperand(1); 2959 VectorType *ZextType = VectorType::getExtendedElementVectorType( 2960 cast<VectorType>(A->getType())); 2961 Value *ExtendedA = Builder.CreateZExt(A, ZextType); 2962 Value *ExtendedB = Builder.CreateZExt(B, ZextType); 2963 Value *Sum = Builder.CreateAdd(ExtendedA, ExtendedB); 2964 Value *AddOne = Builder.CreateAdd(Sum, ConstantInt::get(ZextType, 1)); 2965 Value *ShiftR = Builder.CreateLShr(AddOne, ConstantInt::get(ZextType, 1)); 2966 Rep = Builder.CreateTrunc(ShiftR, A->getType()); 2967 if (CI->getNumArgOperands() > 2) { 2968 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2969 CI->getArgOperand(2)); 2970 } 2971 } else if (IsX86 && (Name.startswith("fma.vfmadd.") || 2972 Name.startswith("fma.vfmsub.") || 2973 Name.startswith("fma.vfnmadd.") || 2974 Name.startswith("fma.vfnmsub."))) { 2975 bool NegMul = Name[6] == 'n'; 2976 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's'; 2977 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's'; 2978 2979 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2980 CI->getArgOperand(2) }; 2981 2982 if (IsScalar) { 2983 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 2984 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 2985 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 2986 } 2987 2988 if (NegMul && !IsScalar) 2989 Ops[0] = Builder.CreateFNeg(Ops[0]); 2990 if (NegMul && IsScalar) 2991 Ops[1] = Builder.CreateFNeg(Ops[1]); 2992 if (NegAcc) 2993 Ops[2] = Builder.CreateFNeg(Ops[2]); 2994 2995 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 2996 Intrinsic::fma, 2997 Ops[0]->getType()), 2998 Ops); 2999 3000 if (IsScalar) 3001 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, 3002 (uint64_t)0); 3003 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) { 3004 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3005 CI->getArgOperand(2) }; 3006 3007 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 3008 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 3009 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 3010 3011 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 3012 Intrinsic::fma, 3013 Ops[0]->getType()), 3014 Ops); 3015 3016 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()), 3017 Rep, (uint64_t)0); 3018 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") || 3019 Name.startswith("avx512.maskz.vfmadd.s") || 3020 Name.startswith("avx512.mask3.vfmadd.s") || 3021 Name.startswith("avx512.mask3.vfmsub.s") || 3022 Name.startswith("avx512.mask3.vfnmsub.s"))) { 3023 bool IsMask3 = Name[11] == '3'; 3024 bool IsMaskZ = Name[11] == 'z'; 3025 // Drop the "avx512.mask." to make it easier. 3026 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3027 bool NegMul = Name[2] == 'n'; 3028 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3029 3030 Value *A = CI->getArgOperand(0); 3031 Value *B = CI->getArgOperand(1); 3032 Value *C = CI->getArgOperand(2); 3033 3034 if (NegMul && (IsMask3 || IsMaskZ)) 3035 A = Builder.CreateFNeg(A); 3036 if (NegMul && !(IsMask3 || IsMaskZ)) 3037 B = Builder.CreateFNeg(B); 3038 if (NegAcc) 3039 C = Builder.CreateFNeg(C); 3040 3041 A = Builder.CreateExtractElement(A, (uint64_t)0); 3042 B = Builder.CreateExtractElement(B, (uint64_t)0); 3043 C = Builder.CreateExtractElement(C, (uint64_t)0); 3044 3045 if (!isa<ConstantInt>(CI->getArgOperand(4)) || 3046 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) { 3047 Value *Ops[] = { A, B, C, CI->getArgOperand(4) }; 3048 3049 Intrinsic::ID IID; 3050 if (Name.back() == 'd') 3051 IID = Intrinsic::x86_avx512_vfmadd_f64; 3052 else 3053 IID = Intrinsic::x86_avx512_vfmadd_f32; 3054 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID); 3055 Rep = Builder.CreateCall(FMA, Ops); 3056 } else { 3057 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3058 Intrinsic::fma, 3059 A->getType()); 3060 Rep = Builder.CreateCall(FMA, { A, B, C }); 3061 } 3062 3063 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) : 3064 IsMask3 ? C : A; 3065 3066 // For Mask3 with NegAcc, we need to create a new extractelement that 3067 // avoids the negation above. 3068 if (NegAcc && IsMask3) 3069 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2), 3070 (uint64_t)0); 3071 3072 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3), 3073 Rep, PassThru); 3074 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0), 3075 Rep, (uint64_t)0); 3076 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") || 3077 Name.startswith("avx512.mask.vfnmadd.p") || 3078 Name.startswith("avx512.mask.vfnmsub.p") || 3079 Name.startswith("avx512.mask3.vfmadd.p") || 3080 Name.startswith("avx512.mask3.vfmsub.p") || 3081 Name.startswith("avx512.mask3.vfnmsub.p") || 3082 Name.startswith("avx512.maskz.vfmadd.p"))) { 3083 bool IsMask3 = Name[11] == '3'; 3084 bool IsMaskZ = Name[11] == 'z'; 3085 // Drop the "avx512.mask." to make it easier. 3086 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3087 bool NegMul = Name[2] == 'n'; 3088 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3089 3090 Value *A = CI->getArgOperand(0); 3091 Value *B = CI->getArgOperand(1); 3092 Value *C = CI->getArgOperand(2); 3093 3094 if (NegMul && (IsMask3 || IsMaskZ)) 3095 A = Builder.CreateFNeg(A); 3096 if (NegMul && !(IsMask3 || IsMaskZ)) 3097 B = Builder.CreateFNeg(B); 3098 if (NegAcc) 3099 C = Builder.CreateFNeg(C); 3100 3101 if (CI->getNumArgOperands() == 5 && 3102 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3103 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3104 Intrinsic::ID IID; 3105 // Check the character before ".512" in string. 3106 if (Name[Name.size()-5] == 's') 3107 IID = Intrinsic::x86_avx512_vfmadd_ps_512; 3108 else 3109 IID = Intrinsic::x86_avx512_vfmadd_pd_512; 3110 3111 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3112 { A, B, C, CI->getArgOperand(4) }); 3113 } else { 3114 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3115 Intrinsic::fma, 3116 A->getType()); 3117 Rep = Builder.CreateCall(FMA, { A, B, C }); 3118 } 3119 3120 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3121 IsMask3 ? CI->getArgOperand(2) : 3122 CI->getArgOperand(0); 3123 3124 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3125 } else if (IsX86 && (Name.startswith("fma.vfmaddsub.p") || 3126 Name.startswith("fma.vfmsubadd.p"))) { 3127 bool IsSubAdd = Name[7] == 's'; 3128 int NumElts = CI->getType()->getVectorNumElements(); 3129 3130 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3131 CI->getArgOperand(2) }; 3132 3133 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3134 Ops[0]->getType()); 3135 Value *Odd = Builder.CreateCall(FMA, Ops); 3136 Ops[2] = Builder.CreateFNeg(Ops[2]); 3137 Value *Even = Builder.CreateCall(FMA, Ops); 3138 3139 if (IsSubAdd) 3140 std::swap(Even, Odd); 3141 3142 SmallVector<uint32_t, 32> Idxs(NumElts); 3143 for (int i = 0; i != NumElts; ++i) 3144 Idxs[i] = i + (i % 2) * NumElts; 3145 3146 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3147 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") || 3148 Name.startswith("avx512.mask3.vfmaddsub.p") || 3149 Name.startswith("avx512.maskz.vfmaddsub.p") || 3150 Name.startswith("avx512.mask3.vfmsubadd.p"))) { 3151 bool IsMask3 = Name[11] == '3'; 3152 bool IsMaskZ = Name[11] == 'z'; 3153 // Drop the "avx512.mask." to make it easier. 3154 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3155 bool IsSubAdd = Name[3] == 's'; 3156 if (CI->getNumArgOperands() == 5 && 3157 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3158 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3159 Intrinsic::ID IID; 3160 // Check the character before ".512" in string. 3161 if (Name[Name.size()-5] == 's') 3162 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512; 3163 else 3164 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512; 3165 3166 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3167 CI->getArgOperand(2), CI->getArgOperand(4) }; 3168 if (IsSubAdd) 3169 Ops[2] = Builder.CreateFNeg(Ops[2]); 3170 3171 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3172 {CI->getArgOperand(0), CI->getArgOperand(1), 3173 CI->getArgOperand(2), CI->getArgOperand(4)}); 3174 } else { 3175 int NumElts = CI->getType()->getVectorNumElements(); 3176 3177 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3178 CI->getArgOperand(2) }; 3179 3180 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3181 Ops[0]->getType()); 3182 Value *Odd = Builder.CreateCall(FMA, Ops); 3183 Ops[2] = Builder.CreateFNeg(Ops[2]); 3184 Value *Even = Builder.CreateCall(FMA, Ops); 3185 3186 if (IsSubAdd) 3187 std::swap(Even, Odd); 3188 3189 SmallVector<uint32_t, 32> Idxs(NumElts); 3190 for (int i = 0; i != NumElts; ++i) 3191 Idxs[i] = i + (i % 2) * NumElts; 3192 3193 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3194 } 3195 3196 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3197 IsMask3 ? CI->getArgOperand(2) : 3198 CI->getArgOperand(0); 3199 3200 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3201 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") || 3202 Name.startswith("avx512.maskz.pternlog."))) { 3203 bool ZeroMask = Name[11] == 'z'; 3204 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3205 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 3206 Intrinsic::ID IID; 3207 if (VecWidth == 128 && EltWidth == 32) 3208 IID = Intrinsic::x86_avx512_pternlog_d_128; 3209 else if (VecWidth == 256 && EltWidth == 32) 3210 IID = Intrinsic::x86_avx512_pternlog_d_256; 3211 else if (VecWidth == 512 && EltWidth == 32) 3212 IID = Intrinsic::x86_avx512_pternlog_d_512; 3213 else if (VecWidth == 128 && EltWidth == 64) 3214 IID = Intrinsic::x86_avx512_pternlog_q_128; 3215 else if (VecWidth == 256 && EltWidth == 64) 3216 IID = Intrinsic::x86_avx512_pternlog_q_256; 3217 else if (VecWidth == 512 && EltWidth == 64) 3218 IID = Intrinsic::x86_avx512_pternlog_q_512; 3219 else 3220 llvm_unreachable("Unexpected intrinsic"); 3221 3222 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3223 CI->getArgOperand(2), CI->getArgOperand(3) }; 3224 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3225 Args); 3226 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3227 : CI->getArgOperand(0); 3228 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru); 3229 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") || 3230 Name.startswith("avx512.maskz.vpmadd52"))) { 3231 bool ZeroMask = Name[11] == 'z'; 3232 bool High = Name[20] == 'h' || Name[21] == 'h'; 3233 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3234 Intrinsic::ID IID; 3235 if (VecWidth == 128 && !High) 3236 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128; 3237 else if (VecWidth == 256 && !High) 3238 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256; 3239 else if (VecWidth == 512 && !High) 3240 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512; 3241 else if (VecWidth == 128 && High) 3242 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128; 3243 else if (VecWidth == 256 && High) 3244 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256; 3245 else if (VecWidth == 512 && High) 3246 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512; 3247 else 3248 llvm_unreachable("Unexpected intrinsic"); 3249 3250 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3251 CI->getArgOperand(2) }; 3252 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3253 Args); 3254 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3255 : CI->getArgOperand(0); 3256 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3257 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") || 3258 Name.startswith("avx512.mask.vpermt2var.") || 3259 Name.startswith("avx512.maskz.vpermt2var."))) { 3260 bool ZeroMask = Name[11] == 'z'; 3261 bool IndexForm = Name[17] == 'i'; 3262 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm); 3263 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") || 3264 Name.startswith("avx512.maskz.vpdpbusd.") || 3265 Name.startswith("avx512.mask.vpdpbusds.") || 3266 Name.startswith("avx512.maskz.vpdpbusds."))) { 3267 bool ZeroMask = Name[11] == 'z'; 3268 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3269 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3270 Intrinsic::ID IID; 3271 if (VecWidth == 128 && !IsSaturating) 3272 IID = Intrinsic::x86_avx512_vpdpbusd_128; 3273 else if (VecWidth == 256 && !IsSaturating) 3274 IID = Intrinsic::x86_avx512_vpdpbusd_256; 3275 else if (VecWidth == 512 && !IsSaturating) 3276 IID = Intrinsic::x86_avx512_vpdpbusd_512; 3277 else if (VecWidth == 128 && IsSaturating) 3278 IID = Intrinsic::x86_avx512_vpdpbusds_128; 3279 else if (VecWidth == 256 && IsSaturating) 3280 IID = Intrinsic::x86_avx512_vpdpbusds_256; 3281 else if (VecWidth == 512 && IsSaturating) 3282 IID = Intrinsic::x86_avx512_vpdpbusds_512; 3283 else 3284 llvm_unreachable("Unexpected intrinsic"); 3285 3286 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3287 CI->getArgOperand(2) }; 3288 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3289 Args); 3290 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3291 : CI->getArgOperand(0); 3292 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3293 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") || 3294 Name.startswith("avx512.maskz.vpdpwssd.") || 3295 Name.startswith("avx512.mask.vpdpwssds.") || 3296 Name.startswith("avx512.maskz.vpdpwssds."))) { 3297 bool ZeroMask = Name[11] == 'z'; 3298 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3299 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3300 Intrinsic::ID IID; 3301 if (VecWidth == 128 && !IsSaturating) 3302 IID = Intrinsic::x86_avx512_vpdpwssd_128; 3303 else if (VecWidth == 256 && !IsSaturating) 3304 IID = Intrinsic::x86_avx512_vpdpwssd_256; 3305 else if (VecWidth == 512 && !IsSaturating) 3306 IID = Intrinsic::x86_avx512_vpdpwssd_512; 3307 else if (VecWidth == 128 && IsSaturating) 3308 IID = Intrinsic::x86_avx512_vpdpwssds_128; 3309 else if (VecWidth == 256 && IsSaturating) 3310 IID = Intrinsic::x86_avx512_vpdpwssds_256; 3311 else if (VecWidth == 512 && IsSaturating) 3312 IID = Intrinsic::x86_avx512_vpdpwssds_512; 3313 else 3314 llvm_unreachable("Unexpected intrinsic"); 3315 3316 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3317 CI->getArgOperand(2) }; 3318 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3319 Args); 3320 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3321 : CI->getArgOperand(0); 3322 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3323 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" || 3324 Name == "addcarry.u32" || Name == "addcarry.u64" || 3325 Name == "subborrow.u32" || Name == "subborrow.u64")) { 3326 Intrinsic::ID IID; 3327 if (Name[0] == 'a' && Name.back() == '2') 3328 IID = Intrinsic::x86_addcarry_32; 3329 else if (Name[0] == 'a' && Name.back() == '4') 3330 IID = Intrinsic::x86_addcarry_64; 3331 else if (Name[0] == 's' && Name.back() == '2') 3332 IID = Intrinsic::x86_subborrow_32; 3333 else if (Name[0] == 's' && Name.back() == '4') 3334 IID = Intrinsic::x86_subborrow_64; 3335 else 3336 llvm_unreachable("Unexpected intrinsic"); 3337 3338 // Make a call with 3 operands. 3339 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3340 CI->getArgOperand(2)}; 3341 Value *NewCall = Builder.CreateCall( 3342 Intrinsic::getDeclaration(CI->getModule(), IID), 3343 Args); 3344 3345 // Extract the second result and store it. 3346 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3347 // Cast the pointer to the right type. 3348 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3), 3349 llvm::PointerType::getUnqual(Data->getType())); 3350 Builder.CreateAlignedStore(Data, Ptr, 1); 3351 // Replace the original call result with the first result of the new call. 3352 Value *CF = Builder.CreateExtractValue(NewCall, 0); 3353 3354 CI->replaceAllUsesWith(CF); 3355 Rep = nullptr; 3356 } else if (IsX86 && Name.startswith("avx512.mask.") && 3357 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) { 3358 // Rep will be updated by the call in the condition. 3359 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 3360 Value *Arg = CI->getArgOperand(0); 3361 Value *Neg = Builder.CreateNeg(Arg, "neg"); 3362 Value *Cmp = Builder.CreateICmpSGE( 3363 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 3364 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 3365 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 3366 Name == "max.ui" || Name == "max.ull")) { 3367 Value *Arg0 = CI->getArgOperand(0); 3368 Value *Arg1 = CI->getArgOperand(1); 3369 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3370 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 3371 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 3372 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 3373 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 3374 Name == "min.ui" || Name == "min.ull")) { 3375 Value *Arg0 = CI->getArgOperand(0); 3376 Value *Arg1 = CI->getArgOperand(1); 3377 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3378 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 3379 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 3380 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 3381 } else if (IsNVVM && Name == "clz.ll") { 3382 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 3383 Value *Arg = CI->getArgOperand(0); 3384 Value *Ctlz = Builder.CreateCall( 3385 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 3386 {Arg->getType()}), 3387 {Arg, Builder.getFalse()}, "ctlz"); 3388 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 3389 } else if (IsNVVM && Name == "popc.ll") { 3390 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 3391 // i64. 3392 Value *Arg = CI->getArgOperand(0); 3393 Value *Popc = Builder.CreateCall( 3394 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 3395 {Arg->getType()}), 3396 Arg, "ctpop"); 3397 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 3398 } else if (IsNVVM && Name == "h2f") { 3399 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 3400 F->getParent(), Intrinsic::convert_from_fp16, 3401 {Builder.getFloatTy()}), 3402 CI->getArgOperand(0), "h2f"); 3403 } else { 3404 llvm_unreachable("Unknown function for CallInst upgrade."); 3405 } 3406 3407 if (Rep) 3408 CI->replaceAllUsesWith(Rep); 3409 CI->eraseFromParent(); 3410 return; 3411 } 3412 3413 const auto &DefaultCase = [&NewFn, &CI]() -> void { 3414 // Handle generic mangling change, but nothing else 3415 assert( 3416 (CI->getCalledFunction()->getName() != NewFn->getName()) && 3417 "Unknown function for CallInst upgrade and isn't just a name change"); 3418 CI->setCalledFunction(NewFn); 3419 }; 3420 CallInst *NewCall = nullptr; 3421 switch (NewFn->getIntrinsicID()) { 3422 default: { 3423 DefaultCase(); 3424 return; 3425 } 3426 3427 case Intrinsic::arm_neon_vld1: 3428 case Intrinsic::arm_neon_vld2: 3429 case Intrinsic::arm_neon_vld3: 3430 case Intrinsic::arm_neon_vld4: 3431 case Intrinsic::arm_neon_vld2lane: 3432 case Intrinsic::arm_neon_vld3lane: 3433 case Intrinsic::arm_neon_vld4lane: 3434 case Intrinsic::arm_neon_vst1: 3435 case Intrinsic::arm_neon_vst2: 3436 case Intrinsic::arm_neon_vst3: 3437 case Intrinsic::arm_neon_vst4: 3438 case Intrinsic::arm_neon_vst2lane: 3439 case Intrinsic::arm_neon_vst3lane: 3440 case Intrinsic::arm_neon_vst4lane: { 3441 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3442 CI->arg_operands().end()); 3443 NewCall = Builder.CreateCall(NewFn, Args); 3444 break; 3445 } 3446 3447 case Intrinsic::bitreverse: 3448 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3449 break; 3450 3451 case Intrinsic::ctlz: 3452 case Intrinsic::cttz: 3453 assert(CI->getNumArgOperands() == 1 && 3454 "Mismatch between function args and call args"); 3455 NewCall = 3456 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 3457 break; 3458 3459 case Intrinsic::objectsize: { 3460 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 3461 ? Builder.getFalse() 3462 : CI->getArgOperand(2); 3463 Value *Dynamic = 3464 CI->getNumArgOperands() < 4 ? Builder.getFalse() : CI->getArgOperand(3); 3465 NewCall = Builder.CreateCall( 3466 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic}); 3467 break; 3468 } 3469 3470 case Intrinsic::ctpop: 3471 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3472 break; 3473 3474 case Intrinsic::convert_from_fp16: 3475 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3476 break; 3477 3478 case Intrinsic::dbg_value: 3479 // Upgrade from the old version that had an extra offset argument. 3480 assert(CI->getNumArgOperands() == 4); 3481 // Drop nonzero offsets instead of attempting to upgrade them. 3482 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1))) 3483 if (Offset->isZeroValue()) { 3484 NewCall = Builder.CreateCall( 3485 NewFn, 3486 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)}); 3487 break; 3488 } 3489 CI->eraseFromParent(); 3490 return; 3491 3492 case Intrinsic::x86_xop_vfrcz_ss: 3493 case Intrinsic::x86_xop_vfrcz_sd: 3494 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 3495 break; 3496 3497 case Intrinsic::x86_xop_vpermil2pd: 3498 case Intrinsic::x86_xop_vpermil2ps: 3499 case Intrinsic::x86_xop_vpermil2pd_256: 3500 case Intrinsic::x86_xop_vpermil2ps_256: { 3501 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3502 CI->arg_operands().end()); 3503 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 3504 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 3505 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 3506 NewCall = Builder.CreateCall(NewFn, Args); 3507 break; 3508 } 3509 3510 case Intrinsic::x86_sse41_ptestc: 3511 case Intrinsic::x86_sse41_ptestz: 3512 case Intrinsic::x86_sse41_ptestnzc: { 3513 // The arguments for these intrinsics used to be v4f32, and changed 3514 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 3515 // So, the only thing required is a bitcast for both arguments. 3516 // First, check the arguments have the old type. 3517 Value *Arg0 = CI->getArgOperand(0); 3518 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 3519 return; 3520 3521 // Old intrinsic, add bitcasts 3522 Value *Arg1 = CI->getArgOperand(1); 3523 3524 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 3525 3526 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 3527 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 3528 3529 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 3530 break; 3531 } 3532 3533 case Intrinsic::x86_rdtscp: { 3534 // This used to take 1 arguments. If we have no arguments, it is already 3535 // upgraded. 3536 if (CI->getNumOperands() == 0) 3537 return; 3538 3539 NewCall = Builder.CreateCall(NewFn); 3540 // Extract the second result and store it. 3541 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3542 // Cast the pointer to the right type. 3543 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0), 3544 llvm::PointerType::getUnqual(Data->getType())); 3545 Builder.CreateAlignedStore(Data, Ptr, 1); 3546 // Replace the original call result with the first result of the new call. 3547 Value *TSC = Builder.CreateExtractValue(NewCall, 0); 3548 3549 std::string Name = CI->getName(); 3550 if (!Name.empty()) { 3551 CI->setName(Name + ".old"); 3552 NewCall->setName(Name); 3553 } 3554 CI->replaceAllUsesWith(TSC); 3555 CI->eraseFromParent(); 3556 return; 3557 } 3558 3559 case Intrinsic::x86_sse41_insertps: 3560 case Intrinsic::x86_sse41_dppd: 3561 case Intrinsic::x86_sse41_dpps: 3562 case Intrinsic::x86_sse41_mpsadbw: 3563 case Intrinsic::x86_avx_dp_ps_256: 3564 case Intrinsic::x86_avx2_mpsadbw: { 3565 // Need to truncate the last argument from i32 to i8 -- this argument models 3566 // an inherently 8-bit immediate operand to these x86 instructions. 3567 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3568 CI->arg_operands().end()); 3569 3570 // Replace the last argument with a trunc. 3571 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 3572 NewCall = Builder.CreateCall(NewFn, Args); 3573 break; 3574 } 3575 3576 case Intrinsic::thread_pointer: { 3577 NewCall = Builder.CreateCall(NewFn, {}); 3578 break; 3579 } 3580 3581 case Intrinsic::invariant_start: 3582 case Intrinsic::invariant_end: 3583 case Intrinsic::masked_load: 3584 case Intrinsic::masked_store: 3585 case Intrinsic::masked_gather: 3586 case Intrinsic::masked_scatter: { 3587 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3588 CI->arg_operands().end()); 3589 NewCall = Builder.CreateCall(NewFn, Args); 3590 break; 3591 } 3592 3593 case Intrinsic::memcpy: 3594 case Intrinsic::memmove: 3595 case Intrinsic::memset: { 3596 // We have to make sure that the call signature is what we're expecting. 3597 // We only want to change the old signatures by removing the alignment arg: 3598 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1) 3599 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1) 3600 // @llvm.memset...(i8*, i8, i[32|64], i32, i1) 3601 // -> @llvm.memset...(i8*, i8, i[32|64], i1) 3602 // Note: i8*'s in the above can be any pointer type 3603 if (CI->getNumArgOperands() != 5) { 3604 DefaultCase(); 3605 return; 3606 } 3607 // Remove alignment argument (3), and add alignment attributes to the 3608 // dest/src pointers. 3609 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), 3610 CI->getArgOperand(2), CI->getArgOperand(4)}; 3611 NewCall = Builder.CreateCall(NewFn, Args); 3612 auto *MemCI = cast<MemIntrinsic>(NewCall); 3613 // All mem intrinsics support dest alignment. 3614 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3)); 3615 MemCI->setDestAlignment(Align->getZExtValue()); 3616 // Memcpy/Memmove also support source alignment. 3617 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) 3618 MTI->setSourceAlignment(Align->getZExtValue()); 3619 break; 3620 } 3621 } 3622 assert(NewCall && "Should have either set this variable or returned through " 3623 "the default case"); 3624 std::string Name = CI->getName(); 3625 if (!Name.empty()) { 3626 CI->setName(Name + ".old"); 3627 NewCall->setName(Name); 3628 } 3629 CI->replaceAllUsesWith(NewCall); 3630 CI->eraseFromParent(); 3631 } 3632 3633 void llvm::UpgradeCallsToIntrinsic(Function *F) { 3634 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 3635 3636 // Check if this function should be upgraded and get the replacement function 3637 // if there is one. 3638 Function *NewFn; 3639 if (UpgradeIntrinsicFunction(F, NewFn)) { 3640 // Replace all users of the old function with the new function or new 3641 // instructions. This is not a range loop because the call is deleted. 3642 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 3643 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 3644 UpgradeIntrinsicCall(CI, NewFn); 3645 3646 // Remove old function, no longer used, from the module. 3647 F->eraseFromParent(); 3648 } 3649 } 3650 3651 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 3652 // Check if the tag uses struct-path aware TBAA format. 3653 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 3654 return &MD; 3655 3656 auto &Context = MD.getContext(); 3657 if (MD.getNumOperands() == 3) { 3658 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 3659 MDNode *ScalarType = MDNode::get(Context, Elts); 3660 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 3661 Metadata *Elts2[] = {ScalarType, ScalarType, 3662 ConstantAsMetadata::get( 3663 Constant::getNullValue(Type::getInt64Ty(Context))), 3664 MD.getOperand(2)}; 3665 return MDNode::get(Context, Elts2); 3666 } 3667 // Create a MDNode <MD, MD, offset 0> 3668 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 3669 Type::getInt64Ty(Context)))}; 3670 return MDNode::get(Context, Elts); 3671 } 3672 3673 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 3674 Instruction *&Temp) { 3675 if (Opc != Instruction::BitCast) 3676 return nullptr; 3677 3678 Temp = nullptr; 3679 Type *SrcTy = V->getType(); 3680 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3681 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3682 LLVMContext &Context = V->getContext(); 3683 3684 // We have no information about target data layout, so we assume that 3685 // the maximum pointer size is 64bit. 3686 Type *MidTy = Type::getInt64Ty(Context); 3687 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 3688 3689 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 3690 } 3691 3692 return nullptr; 3693 } 3694 3695 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 3696 if (Opc != Instruction::BitCast) 3697 return nullptr; 3698 3699 Type *SrcTy = C->getType(); 3700 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3701 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3702 LLVMContext &Context = C->getContext(); 3703 3704 // We have no information about target data layout, so we assume that 3705 // the maximum pointer size is 64bit. 3706 Type *MidTy = Type::getInt64Ty(Context); 3707 3708 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 3709 DestTy); 3710 } 3711 3712 return nullptr; 3713 } 3714 3715 /// Check the debug info version number, if it is out-dated, drop the debug 3716 /// info. Return true if module is modified. 3717 bool llvm::UpgradeDebugInfo(Module &M) { 3718 unsigned Version = getDebugMetadataVersionFromModule(M); 3719 if (Version == DEBUG_METADATA_VERSION) { 3720 bool BrokenDebugInfo = false; 3721 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo)) 3722 report_fatal_error("Broken module found, compilation aborted!"); 3723 if (!BrokenDebugInfo) 3724 // Everything is ok. 3725 return false; 3726 else { 3727 // Diagnose malformed debug info. 3728 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M); 3729 M.getContext().diagnose(Diag); 3730 } 3731 } 3732 bool Modified = StripDebugInfo(M); 3733 if (Modified && Version != DEBUG_METADATA_VERSION) { 3734 // Diagnose a version mismatch. 3735 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 3736 M.getContext().diagnose(DiagVersion); 3737 } 3738 return Modified; 3739 } 3740 3741 bool llvm::UpgradeRetainReleaseMarker(Module &M) { 3742 bool Changed = false; 3743 NamedMDNode *ModRetainReleaseMarker = 3744 M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"); 3745 if (ModRetainReleaseMarker) { 3746 MDNode *Op = ModRetainReleaseMarker->getOperand(0); 3747 if (Op) { 3748 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0)); 3749 if (ID) { 3750 SmallVector<StringRef, 4> ValueComp; 3751 ID->getString().split(ValueComp, "#"); 3752 if (ValueComp.size() == 2) { 3753 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str(); 3754 Metadata *Ops[1] = {MDString::get(M.getContext(), NewValue)}; 3755 ModRetainReleaseMarker->setOperand(0, 3756 MDNode::get(M.getContext(), Ops)); 3757 Changed = true; 3758 } 3759 } 3760 } 3761 } 3762 return Changed; 3763 } 3764 3765 bool llvm::UpgradeModuleFlags(Module &M) { 3766 NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 3767 if (!ModFlags) 3768 return false; 3769 3770 bool HasObjCFlag = false, HasClassProperties = false, Changed = false; 3771 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 3772 MDNode *Op = ModFlags->getOperand(I); 3773 if (Op->getNumOperands() != 3) 3774 continue; 3775 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 3776 if (!ID) 3777 continue; 3778 if (ID->getString() == "Objective-C Image Info Version") 3779 HasObjCFlag = true; 3780 if (ID->getString() == "Objective-C Class Properties") 3781 HasClassProperties = true; 3782 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two 3783 // field was Error and now they are Max. 3784 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") { 3785 if (auto *Behavior = 3786 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) { 3787 if (Behavior->getLimitedValue() == Module::Error) { 3788 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 3789 Metadata *Ops[3] = { 3790 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)), 3791 MDString::get(M.getContext(), ID->getString()), 3792 Op->getOperand(2)}; 3793 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3794 Changed = true; 3795 } 3796 } 3797 } 3798 // Upgrade Objective-C Image Info Section. Removed the whitespce in the 3799 // section name so that llvm-lto will not complain about mismatching 3800 // module flags that is functionally the same. 3801 if (ID->getString() == "Objective-C Image Info Section") { 3802 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) { 3803 SmallVector<StringRef, 4> ValueComp; 3804 Value->getString().split(ValueComp, " "); 3805 if (ValueComp.size() != 1) { 3806 std::string NewValue; 3807 for (auto &S : ValueComp) 3808 NewValue += S.str(); 3809 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1), 3810 MDString::get(M.getContext(), NewValue)}; 3811 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3812 Changed = true; 3813 } 3814 } 3815 } 3816 } 3817 3818 // "Objective-C Class Properties" is recently added for Objective-C. We 3819 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 3820 // flag of value 0, so we can correclty downgrade this flag when trying to 3821 // link an ObjC bitcode without this module flag with an ObjC bitcode with 3822 // this module flag. 3823 if (HasObjCFlag && !HasClassProperties) { 3824 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 3825 (uint32_t)0); 3826 Changed = true; 3827 } 3828 3829 return Changed; 3830 } 3831 3832 void llvm::UpgradeSectionAttributes(Module &M) { 3833 auto TrimSpaces = [](StringRef Section) -> std::string { 3834 SmallVector<StringRef, 5> Components; 3835 Section.split(Components, ','); 3836 3837 SmallString<32> Buffer; 3838 raw_svector_ostream OS(Buffer); 3839 3840 for (auto Component : Components) 3841 OS << ',' << Component.trim(); 3842 3843 return OS.str().substr(1); 3844 }; 3845 3846 for (auto &GV : M.globals()) { 3847 if (!GV.hasSection()) 3848 continue; 3849 3850 StringRef Section = GV.getSection(); 3851 3852 if (!Section.startswith("__DATA, __objc_catlist")) 3853 continue; 3854 3855 // __DATA, __objc_catlist, regular, no_dead_strip 3856 // __DATA,__objc_catlist,regular,no_dead_strip 3857 GV.setSection(TrimSpaces(Section)); 3858 } 3859 } 3860 3861 static bool isOldLoopArgument(Metadata *MD) { 3862 auto *T = dyn_cast_or_null<MDTuple>(MD); 3863 if (!T) 3864 return false; 3865 if (T->getNumOperands() < 1) 3866 return false; 3867 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 3868 if (!S) 3869 return false; 3870 return S->getString().startswith("llvm.vectorizer."); 3871 } 3872 3873 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 3874 StringRef OldPrefix = "llvm.vectorizer."; 3875 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 3876 3877 if (OldTag == "llvm.vectorizer.unroll") 3878 return MDString::get(C, "llvm.loop.interleave.count"); 3879 3880 return MDString::get( 3881 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 3882 .str()); 3883 } 3884 3885 static Metadata *upgradeLoopArgument(Metadata *MD) { 3886 auto *T = dyn_cast_or_null<MDTuple>(MD); 3887 if (!T) 3888 return MD; 3889 if (T->getNumOperands() < 1) 3890 return MD; 3891 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 3892 if (!OldTag) 3893 return MD; 3894 if (!OldTag->getString().startswith("llvm.vectorizer.")) 3895 return MD; 3896 3897 // This has an old tag. Upgrade it. 3898 SmallVector<Metadata *, 8> Ops; 3899 Ops.reserve(T->getNumOperands()); 3900 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 3901 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 3902 Ops.push_back(T->getOperand(I)); 3903 3904 return MDTuple::get(T->getContext(), Ops); 3905 } 3906 3907 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 3908 auto *T = dyn_cast<MDTuple>(&N); 3909 if (!T) 3910 return &N; 3911 3912 if (none_of(T->operands(), isOldLoopArgument)) 3913 return &N; 3914 3915 SmallVector<Metadata *, 8> Ops; 3916 Ops.reserve(T->getNumOperands()); 3917 for (Metadata *MD : T->operands()) 3918 Ops.push_back(upgradeLoopArgument(MD)); 3919 3920 return MDTuple::get(T->getContext(), Ops); 3921 } 3922