1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the auto-upgrade helper functions. 10 // This is where deprecated IR intrinsics and other IR features are updated to 11 // current specifications. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/AutoUpgrade.h" 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/DIBuilder.h" 19 #include "llvm/IR/DebugInfo.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/IR/Module.h" 27 #include "llvm/IR/Verifier.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/Regex.h" 30 #include <cstring> 31 using namespace llvm; 32 33 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 34 35 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 36 // changed their type from v4f32 to v2i64. 37 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 38 Function *&NewFn) { 39 // Check whether this is an old version of the function, which received 40 // v4f32 arguments. 41 Type *Arg0Type = F->getFunctionType()->getParamType(0); 42 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 43 return false; 44 45 // Yes, it's old, replace it with new version. 46 rename(F); 47 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 48 return true; 49 } 50 51 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 52 // arguments have changed their type from i32 to i8. 53 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 54 Function *&NewFn) { 55 // Check that the last argument is an i32. 56 Type *LastArgType = F->getFunctionType()->getParamType( 57 F->getFunctionType()->getNumParams() - 1); 58 if (!LastArgType->isIntegerTy(32)) 59 return false; 60 61 // Move this function aside and map down. 62 rename(F); 63 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 64 return true; 65 } 66 67 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 68 // All of the intrinsics matches below should be marked with which llvm 69 // version started autoupgrading them. At some point in the future we would 70 // like to use this information to remove upgrade code for some older 71 // intrinsics. It is currently undecided how we will determine that future 72 // point. 73 if (Name == "addcarryx.u32" || // Added in 8.0 74 Name == "addcarryx.u64" || // Added in 8.0 75 Name == "addcarry.u32" || // Added in 8.0 76 Name == "addcarry.u64" || // Added in 8.0 77 Name == "subborrow.u32" || // Added in 8.0 78 Name == "subborrow.u64" || // Added in 8.0 79 Name.startswith("sse2.padds.") || // Added in 8.0 80 Name.startswith("sse2.psubs.") || // Added in 8.0 81 Name.startswith("sse2.paddus.") || // Added in 8.0 82 Name.startswith("sse2.psubus.") || // Added in 8.0 83 Name.startswith("avx2.padds.") || // Added in 8.0 84 Name.startswith("avx2.psubs.") || // Added in 8.0 85 Name.startswith("avx2.paddus.") || // Added in 8.0 86 Name.startswith("avx2.psubus.") || // Added in 8.0 87 Name.startswith("avx512.padds.") || // Added in 8.0 88 Name.startswith("avx512.psubs.") || // Added in 8.0 89 Name.startswith("avx512.mask.padds.") || // Added in 8.0 90 Name.startswith("avx512.mask.psubs.") || // Added in 8.0 91 Name.startswith("avx512.mask.paddus.") || // Added in 8.0 92 Name.startswith("avx512.mask.psubus.") || // Added in 8.0 93 Name=="ssse3.pabs.b.128" || // Added in 6.0 94 Name=="ssse3.pabs.w.128" || // Added in 6.0 95 Name=="ssse3.pabs.d.128" || // Added in 6.0 96 Name.startswith("fma4.vfmadd.s") || // Added in 7.0 97 Name.startswith("fma.vfmadd.") || // Added in 7.0 98 Name.startswith("fma.vfmsub.") || // Added in 7.0 99 Name.startswith("fma.vfmaddsub.") || // Added in 7.0 100 Name.startswith("fma.vfmsubadd.") || // Added in 7.0 101 Name.startswith("fma.vfnmadd.") || // Added in 7.0 102 Name.startswith("fma.vfnmsub.") || // Added in 7.0 103 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0 104 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0 105 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0 106 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0 107 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0 108 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0 109 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0 110 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0 111 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0 112 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0 113 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0 114 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0 115 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0 116 Name.startswith("avx512.kunpck") || //added in 6.0 117 Name.startswith("avx2.pabs.") || // Added in 6.0 118 Name.startswith("avx512.mask.pabs.") || // Added in 6.0 119 Name.startswith("avx512.broadcastm") || // Added in 6.0 120 Name == "sse.sqrt.ss" || // Added in 7.0 121 Name == "sse2.sqrt.sd" || // Added in 7.0 122 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0 123 Name.startswith("avx.sqrt.p") || // Added in 7.0 124 Name.startswith("sse2.sqrt.p") || // Added in 7.0 125 Name.startswith("sse.sqrt.p") || // Added in 7.0 126 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0 127 Name.startswith("sse2.pcmpeq.") || // Added in 3.1 128 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 129 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 130 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 131 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 132 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 133 Name.startswith("avx.vperm2f128.") || // Added in 6.0 134 Name == "avx2.vperm2i128" || // Added in 6.0 135 Name == "sse.add.ss" || // Added in 4.0 136 Name == "sse2.add.sd" || // Added in 4.0 137 Name == "sse.sub.ss" || // Added in 4.0 138 Name == "sse2.sub.sd" || // Added in 4.0 139 Name == "sse.mul.ss" || // Added in 4.0 140 Name == "sse2.mul.sd" || // Added in 4.0 141 Name == "sse.div.ss" || // Added in 4.0 142 Name == "sse2.div.sd" || // Added in 4.0 143 Name == "sse41.pmaxsb" || // Added in 3.9 144 Name == "sse2.pmaxs.w" || // Added in 3.9 145 Name == "sse41.pmaxsd" || // Added in 3.9 146 Name == "sse2.pmaxu.b" || // Added in 3.9 147 Name == "sse41.pmaxuw" || // Added in 3.9 148 Name == "sse41.pmaxud" || // Added in 3.9 149 Name == "sse41.pminsb" || // Added in 3.9 150 Name == "sse2.pmins.w" || // Added in 3.9 151 Name == "sse41.pminsd" || // Added in 3.9 152 Name == "sse2.pminu.b" || // Added in 3.9 153 Name == "sse41.pminuw" || // Added in 3.9 154 Name == "sse41.pminud" || // Added in 3.9 155 Name == "avx512.kand.w" || // Added in 7.0 156 Name == "avx512.kandn.w" || // Added in 7.0 157 Name == "avx512.knot.w" || // Added in 7.0 158 Name == "avx512.kor.w" || // Added in 7.0 159 Name == "avx512.kxor.w" || // Added in 7.0 160 Name == "avx512.kxnor.w" || // Added in 7.0 161 Name == "avx512.kortestc.w" || // Added in 7.0 162 Name == "avx512.kortestz.w" || // Added in 7.0 163 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 164 Name.startswith("avx2.pmax") || // Added in 3.9 165 Name.startswith("avx2.pmin") || // Added in 3.9 166 Name.startswith("avx512.mask.pmax") || // Added in 4.0 167 Name.startswith("avx512.mask.pmin") || // Added in 4.0 168 Name.startswith("avx2.vbroadcast") || // Added in 3.8 169 Name.startswith("avx2.pbroadcast") || // Added in 3.8 170 Name.startswith("avx.vpermil.") || // Added in 3.1 171 Name.startswith("sse2.pshuf") || // Added in 3.9 172 Name.startswith("avx512.pbroadcast") || // Added in 3.9 173 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 174 Name.startswith("avx512.mask.movddup") || // Added in 3.9 175 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 176 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 177 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 178 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 179 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 180 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 181 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 182 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 183 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 184 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 185 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 186 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 187 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 188 Name.startswith("avx512.mask.pand.") || // Added in 3.9 189 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 190 Name.startswith("avx512.mask.por.") || // Added in 3.9 191 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 192 Name.startswith("avx512.mask.and.") || // Added in 3.9 193 Name.startswith("avx512.mask.andn.") || // Added in 3.9 194 Name.startswith("avx512.mask.or.") || // Added in 3.9 195 Name.startswith("avx512.mask.xor.") || // Added in 3.9 196 Name.startswith("avx512.mask.padd.") || // Added in 4.0 197 Name.startswith("avx512.mask.psub.") || // Added in 4.0 198 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 199 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 200 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 201 Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0 202 Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0 203 Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0 204 Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0 205 Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0 206 Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0 207 Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0 208 Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0 209 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0 210 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0 211 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0 212 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0 213 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0 214 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0 215 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0 216 Name == "avx512.cvtusi2sd" || // Added in 7.0 217 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 218 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 219 Name == "sse2.pmulu.dq" || // Added in 7.0 220 Name == "sse41.pmuldq" || // Added in 7.0 221 Name == "avx2.pmulu.dq" || // Added in 7.0 222 Name == "avx2.pmul.dq" || // Added in 7.0 223 Name == "avx512.pmulu.dq.512" || // Added in 7.0 224 Name == "avx512.pmul.dq.512" || // Added in 7.0 225 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 226 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 227 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0 228 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0 229 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0 230 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0 231 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0 232 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 233 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 234 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 235 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 236 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0 237 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0 238 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0 239 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0 240 Name.startswith("avx512.mask.cmp.p") || // Added in 7.0 241 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0 242 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0 243 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0 244 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0 245 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0 246 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 247 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 248 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 249 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 250 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 251 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 252 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 253 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 254 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 255 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 256 Name.startswith("avx512.mask.pslli") || // Added in 4.0 257 Name.startswith("avx512.mask.psrai") || // Added in 4.0 258 Name.startswith("avx512.mask.psrli") || // Added in 4.0 259 Name.startswith("avx512.mask.psllv") || // Added in 4.0 260 Name.startswith("avx512.mask.psrav") || // Added in 4.0 261 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 262 Name.startswith("sse41.pmovsx") || // Added in 3.8 263 Name.startswith("sse41.pmovzx") || // Added in 3.9 264 Name.startswith("avx2.pmovsx") || // Added in 3.9 265 Name.startswith("avx2.pmovzx") || // Added in 3.9 266 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 267 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 268 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 269 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0 270 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0 271 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0 272 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0 273 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0 274 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0 275 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0 276 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0 277 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0 278 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0 279 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0 280 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0 281 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0 282 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0 283 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0 284 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0 285 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0 286 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0 287 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0 288 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0 289 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0 290 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0 291 Name.startswith("avx512.vpshld.") || // Added in 8.0 292 Name.startswith("avx512.vpshrd.") || // Added in 8.0 293 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0 294 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0 295 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0 296 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0 297 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0 298 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0 299 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0 300 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0 301 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0 302 Name.startswith("avx512.mask.conflict.") || // Added in 9.0 303 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0 304 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0 305 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0 306 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0 307 Name == "sse.cvtsi2ss" || // Added in 7.0 308 Name == "sse.cvtsi642ss" || // Added in 7.0 309 Name == "sse2.cvtsi2sd" || // Added in 7.0 310 Name == "sse2.cvtsi642sd" || // Added in 7.0 311 Name == "sse2.cvtss2sd" || // Added in 7.0 312 Name == "sse2.cvtdq2pd" || // Added in 3.9 313 Name == "sse2.cvtdq2ps" || // Added in 7.0 314 Name == "sse2.cvtps2pd" || // Added in 3.9 315 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 316 Name == "avx.cvtdq2.ps.256" || // Added in 7.0 317 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 318 Name.startswith("avx.vinsertf128.") || // Added in 3.7 319 Name == "avx2.vinserti128" || // Added in 3.7 320 Name.startswith("avx512.mask.insert") || // Added in 4.0 321 Name.startswith("avx.vextractf128.") || // Added in 3.7 322 Name == "avx2.vextracti128" || // Added in 3.7 323 Name.startswith("avx512.mask.vextract") || // Added in 4.0 324 Name.startswith("sse4a.movnt.") || // Added in 3.9 325 Name.startswith("avx.movnt.") || // Added in 3.2 326 Name.startswith("avx512.storent.") || // Added in 3.9 327 Name == "sse41.movntdqa" || // Added in 5.0 328 Name == "avx2.movntdqa" || // Added in 5.0 329 Name == "avx512.movntdqa" || // Added in 5.0 330 Name == "sse2.storel.dq" || // Added in 3.9 331 Name.startswith("sse.storeu.") || // Added in 3.9 332 Name.startswith("sse2.storeu.") || // Added in 3.9 333 Name.startswith("avx.storeu.") || // Added in 3.9 334 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 335 Name.startswith("avx512.mask.store.p") || // Added in 3.9 336 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 337 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 338 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 339 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 340 Name == "avx512.mask.store.ss" || // Added in 7.0 341 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 342 Name.startswith("avx512.mask.load.") || // Added in 3.9 343 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0 344 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0 345 Name.startswith("avx512.mask.expand.b") || // Added in 9.0 346 Name.startswith("avx512.mask.expand.w") || // Added in 9.0 347 Name.startswith("avx512.mask.expand.d") || // Added in 9.0 348 Name.startswith("avx512.mask.expand.q") || // Added in 9.0 349 Name.startswith("avx512.mask.expand.p") || // Added in 9.0 350 Name.startswith("avx512.mask.compress.b") || // Added in 9.0 351 Name.startswith("avx512.mask.compress.w") || // Added in 9.0 352 Name.startswith("avx512.mask.compress.d") || // Added in 9.0 353 Name.startswith("avx512.mask.compress.q") || // Added in 9.0 354 Name.startswith("avx512.mask.compress.p") || // Added in 9.0 355 Name == "sse42.crc32.64.8" || // Added in 3.4 356 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 357 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0 358 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 359 Name.startswith("avx512.mask.valign.") || // Added in 4.0 360 Name.startswith("sse2.psll.dq") || // Added in 3.7 361 Name.startswith("sse2.psrl.dq") || // Added in 3.7 362 Name.startswith("avx2.psll.dq") || // Added in 3.7 363 Name.startswith("avx2.psrl.dq") || // Added in 3.7 364 Name.startswith("avx512.psll.dq") || // Added in 3.9 365 Name.startswith("avx512.psrl.dq") || // Added in 3.9 366 Name == "sse41.pblendw" || // Added in 3.7 367 Name.startswith("sse41.blendp") || // Added in 3.7 368 Name.startswith("avx.blend.p") || // Added in 3.7 369 Name == "avx2.pblendw" || // Added in 3.7 370 Name.startswith("avx2.pblendd.") || // Added in 3.7 371 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 372 Name == "avx2.vbroadcasti128" || // Added in 3.7 373 Name.startswith("avx512.mask.broadcastf") || // Added in 6.0 374 Name.startswith("avx512.mask.broadcasti") || // Added in 6.0 375 Name == "xop.vpcmov" || // Added in 3.8 376 Name == "xop.vpcmov.256" || // Added in 5.0 377 Name.startswith("avx512.mask.move.s") || // Added in 4.0 378 Name.startswith("avx512.cvtmask2") || // Added in 5.0 379 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0 380 Name.startswith("xop.vprot") || // Added in 8.0 381 Name.startswith("avx512.prol") || // Added in 8.0 382 Name.startswith("avx512.pror") || // Added in 8.0 383 Name.startswith("avx512.mask.prorv.") || // Added in 8.0 384 Name.startswith("avx512.mask.pror.") || // Added in 8.0 385 Name.startswith("avx512.mask.prolv.") || // Added in 8.0 386 Name.startswith("avx512.mask.prol.") || // Added in 8.0 387 Name.startswith("avx512.ptestm") || //Added in 6.0 388 Name.startswith("avx512.ptestnm") || //Added in 6.0 389 Name.startswith("sse2.pavg") || // Added in 6.0 390 Name.startswith("avx2.pavg") || // Added in 6.0 391 Name.startswith("avx512.mask.pavg")) // Added in 6.0 392 return true; 393 394 return false; 395 } 396 397 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 398 Function *&NewFn) { 399 // Only handle intrinsics that start with "x86.". 400 if (!Name.startswith("x86.")) 401 return false; 402 // Remove "x86." prefix. 403 Name = Name.substr(4); 404 405 if (ShouldUpgradeX86Intrinsic(F, Name)) { 406 NewFn = nullptr; 407 return true; 408 } 409 410 if (Name == "rdtscp") { // Added in 8.0 411 // If this intrinsic has 0 operands, it's the new version. 412 if (F->getFunctionType()->getNumParams() == 0) 413 return false; 414 415 rename(F); 416 NewFn = Intrinsic::getDeclaration(F->getParent(), 417 Intrinsic::x86_rdtscp); 418 return true; 419 } 420 421 // SSE4.1 ptest functions may have an old signature. 422 if (Name.startswith("sse41.ptest")) { // Added in 3.2 423 if (Name.substr(11) == "c") 424 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 425 if (Name.substr(11) == "z") 426 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 427 if (Name.substr(11) == "nzc") 428 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 429 } 430 // Several blend and other instructions with masks used the wrong number of 431 // bits. 432 if (Name == "sse41.insertps") // Added in 3.6 433 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 434 NewFn); 435 if (Name == "sse41.dppd") // Added in 3.6 436 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 437 NewFn); 438 if (Name == "sse41.dpps") // Added in 3.6 439 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 440 NewFn); 441 if (Name == "sse41.mpsadbw") // Added in 3.6 442 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 443 NewFn); 444 if (Name == "avx.dp.ps.256") // Added in 3.6 445 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 446 NewFn); 447 if (Name == "avx2.mpsadbw") // Added in 3.6 448 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 449 NewFn); 450 451 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 452 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 453 rename(F); 454 NewFn = Intrinsic::getDeclaration(F->getParent(), 455 Intrinsic::x86_xop_vfrcz_ss); 456 return true; 457 } 458 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 459 rename(F); 460 NewFn = Intrinsic::getDeclaration(F->getParent(), 461 Intrinsic::x86_xop_vfrcz_sd); 462 return true; 463 } 464 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 465 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 466 auto Idx = F->getFunctionType()->getParamType(2); 467 if (Idx->isFPOrFPVectorTy()) { 468 rename(F); 469 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 470 unsigned EltSize = Idx->getScalarSizeInBits(); 471 Intrinsic::ID Permil2ID; 472 if (EltSize == 64 && IdxSize == 128) 473 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 474 else if (EltSize == 32 && IdxSize == 128) 475 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 476 else if (EltSize == 64 && IdxSize == 256) 477 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 478 else 479 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 480 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 481 return true; 482 } 483 } 484 485 return false; 486 } 487 488 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 489 assert(F && "Illegal to upgrade a non-existent Function."); 490 491 // Quickly eliminate it, if it's not a candidate. 492 StringRef Name = F->getName(); 493 if (Name.size() <= 8 || !Name.startswith("llvm.")) 494 return false; 495 Name = Name.substr(5); // Strip off "llvm." 496 497 switch (Name[0]) { 498 default: break; 499 case 'a': { 500 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 501 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 502 F->arg_begin()->getType()); 503 return true; 504 } 505 if (Name.startswith("arm.neon.vclz")) { 506 Type* args[2] = { 507 F->arg_begin()->getType(), 508 Type::getInt1Ty(F->getContext()) 509 }; 510 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 511 // the end of the name. Change name from llvm.arm.neon.vclz.* to 512 // llvm.ctlz.* 513 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 514 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 515 "llvm.ctlz." + Name.substr(14), F->getParent()); 516 return true; 517 } 518 if (Name.startswith("arm.neon.vcnt")) { 519 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 520 F->arg_begin()->getType()); 521 return true; 522 } 523 Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 524 if (vldRegex.match(Name)) { 525 auto fArgs = F->getFunctionType()->params(); 526 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 527 // Can't use Intrinsic::getDeclaration here as the return types might 528 // then only be structurally equal. 529 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 530 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 531 "llvm." + Name + ".p0i8", F->getParent()); 532 return true; 533 } 534 Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 535 if (vstRegex.match(Name)) { 536 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 537 Intrinsic::arm_neon_vst2, 538 Intrinsic::arm_neon_vst3, 539 Intrinsic::arm_neon_vst4}; 540 541 static const Intrinsic::ID StoreLaneInts[] = { 542 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 543 Intrinsic::arm_neon_vst4lane 544 }; 545 546 auto fArgs = F->getFunctionType()->params(); 547 Type *Tys[] = {fArgs[0], fArgs[1]}; 548 if (Name.find("lane") == StringRef::npos) 549 NewFn = Intrinsic::getDeclaration(F->getParent(), 550 StoreInts[fArgs.size() - 3], Tys); 551 else 552 NewFn = Intrinsic::getDeclaration(F->getParent(), 553 StoreLaneInts[fArgs.size() - 5], Tys); 554 return true; 555 } 556 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 557 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 558 return true; 559 } 560 if (Name == "x86.seh.recoverfp") { 561 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp); 562 return true; 563 } 564 break; 565 } 566 567 case 'c': { 568 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 569 rename(F); 570 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 571 F->arg_begin()->getType()); 572 return true; 573 } 574 if (Name.startswith("cttz.") && F->arg_size() == 1) { 575 rename(F); 576 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 577 F->arg_begin()->getType()); 578 return true; 579 } 580 break; 581 } 582 case 'd': { 583 if (Name == "dbg.value" && F->arg_size() == 4) { 584 rename(F); 585 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value); 586 return true; 587 } 588 break; 589 } 590 case 'i': 591 case 'l': { 592 bool IsLifetimeStart = Name.startswith("lifetime.start"); 593 if (IsLifetimeStart || Name.startswith("invariant.start")) { 594 Intrinsic::ID ID = IsLifetimeStart ? 595 Intrinsic::lifetime_start : Intrinsic::invariant_start; 596 auto Args = F->getFunctionType()->params(); 597 Type* ObjectPtr[1] = {Args[1]}; 598 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 599 rename(F); 600 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 601 return true; 602 } 603 } 604 605 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 606 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 607 Intrinsic::ID ID = IsLifetimeEnd ? 608 Intrinsic::lifetime_end : Intrinsic::invariant_end; 609 610 auto Args = F->getFunctionType()->params(); 611 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 612 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 613 rename(F); 614 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 615 return true; 616 } 617 } 618 if (Name.startswith("invariant.group.barrier")) { 619 // Rename invariant.group.barrier to launder.invariant.group 620 auto Args = F->getFunctionType()->params(); 621 Type* ObjectPtr[1] = {Args[0]}; 622 rename(F); 623 NewFn = Intrinsic::getDeclaration(F->getParent(), 624 Intrinsic::launder_invariant_group, ObjectPtr); 625 return true; 626 627 } 628 629 break; 630 } 631 case 'm': { 632 if (Name.startswith("masked.load.")) { 633 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 634 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 635 rename(F); 636 NewFn = Intrinsic::getDeclaration(F->getParent(), 637 Intrinsic::masked_load, 638 Tys); 639 return true; 640 } 641 } 642 if (Name.startswith("masked.store.")) { 643 auto Args = F->getFunctionType()->params(); 644 Type *Tys[] = { Args[0], Args[1] }; 645 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 646 rename(F); 647 NewFn = Intrinsic::getDeclaration(F->getParent(), 648 Intrinsic::masked_store, 649 Tys); 650 return true; 651 } 652 } 653 // Renaming gather/scatter intrinsics with no address space overloading 654 // to the new overload which includes an address space 655 if (Name.startswith("masked.gather.")) { 656 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; 657 if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) { 658 rename(F); 659 NewFn = Intrinsic::getDeclaration(F->getParent(), 660 Intrinsic::masked_gather, Tys); 661 return true; 662 } 663 } 664 if (Name.startswith("masked.scatter.")) { 665 auto Args = F->getFunctionType()->params(); 666 Type *Tys[] = {Args[0], Args[1]}; 667 if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) { 668 rename(F); 669 NewFn = Intrinsic::getDeclaration(F->getParent(), 670 Intrinsic::masked_scatter, Tys); 671 return true; 672 } 673 } 674 // Updating the memory intrinsics (memcpy/memmove/memset) that have an 675 // alignment parameter to embedding the alignment as an attribute of 676 // the pointer args. 677 if (Name.startswith("memcpy.") && F->arg_size() == 5) { 678 rename(F); 679 // Get the types of dest, src, and len 680 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 681 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy, 682 ParamTypes); 683 return true; 684 } 685 if (Name.startswith("memmove.") && F->arg_size() == 5) { 686 rename(F); 687 // Get the types of dest, src, and len 688 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 689 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove, 690 ParamTypes); 691 return true; 692 } 693 if (Name.startswith("memset.") && F->arg_size() == 5) { 694 rename(F); 695 // Get the types of dest, and len 696 const auto *FT = F->getFunctionType(); 697 Type *ParamTypes[2] = { 698 FT->getParamType(0), // Dest 699 FT->getParamType(2) // len 700 }; 701 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset, 702 ParamTypes); 703 return true; 704 } 705 break; 706 } 707 case 'n': { 708 if (Name.startswith("nvvm.")) { 709 Name = Name.substr(5); 710 711 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 712 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 713 .Cases("brev32", "brev64", Intrinsic::bitreverse) 714 .Case("clz.i", Intrinsic::ctlz) 715 .Case("popc.i", Intrinsic::ctpop) 716 .Default(Intrinsic::not_intrinsic); 717 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 718 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 719 {F->getReturnType()}); 720 return true; 721 } 722 723 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 724 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 725 // 726 // TODO: We could add lohi.i2d. 727 bool Expand = StringSwitch<bool>(Name) 728 .Cases("abs.i", "abs.ll", true) 729 .Cases("clz.ll", "popc.ll", "h2f", true) 730 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 731 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 732 .Default(false); 733 if (Expand) { 734 NewFn = nullptr; 735 return true; 736 } 737 } 738 break; 739 } 740 case 'o': 741 // We only need to change the name to match the mangling including the 742 // address space. 743 if (Name.startswith("objectsize.")) { 744 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 745 if (F->arg_size() == 2 || 746 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 747 rename(F); 748 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 749 Tys); 750 return true; 751 } 752 } 753 break; 754 755 case 's': 756 if (Name == "stackprotectorcheck") { 757 NewFn = nullptr; 758 return true; 759 } 760 break; 761 762 case 'x': 763 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 764 return true; 765 } 766 // Remangle our intrinsic since we upgrade the mangling 767 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 768 if (Result != None) { 769 NewFn = Result.getValue(); 770 return true; 771 } 772 773 // This may not belong here. This function is effectively being overloaded 774 // to both detect an intrinsic which needs upgrading, and to provide the 775 // upgraded form of the intrinsic. We should perhaps have two separate 776 // functions for this. 777 return false; 778 } 779 780 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 781 NewFn = nullptr; 782 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 783 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 784 785 // Upgrade intrinsic attributes. This does not change the function. 786 if (NewFn) 787 F = NewFn; 788 if (Intrinsic::ID id = F->getIntrinsicID()) 789 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 790 return Upgraded; 791 } 792 793 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 794 // Nothing to do yet. 795 return false; 796 } 797 798 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 799 // to byte shuffles. 800 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 801 Value *Op, unsigned Shift) { 802 Type *ResultTy = Op->getType(); 803 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 804 805 // Bitcast from a 64-bit element type to a byte element type. 806 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 807 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 808 809 // We'll be shuffling in zeroes. 810 Value *Res = Constant::getNullValue(VecTy); 811 812 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 813 // we'll just return the zero vector. 814 if (Shift < 16) { 815 uint32_t Idxs[64]; 816 // 256/512-bit version is split into 2/4 16-byte lanes. 817 for (unsigned l = 0; l != NumElts; l += 16) 818 for (unsigned i = 0; i != 16; ++i) { 819 unsigned Idx = NumElts + i - Shift; 820 if (Idx < NumElts) 821 Idx -= NumElts - 16; // end of lane, switch operand. 822 Idxs[l + i] = Idx + l; 823 } 824 825 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 826 } 827 828 // Bitcast back to a 64-bit element type. 829 return Builder.CreateBitCast(Res, ResultTy, "cast"); 830 } 831 832 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 833 // to byte shuffles. 834 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 835 unsigned Shift) { 836 Type *ResultTy = Op->getType(); 837 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 838 839 // Bitcast from a 64-bit element type to a byte element type. 840 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 841 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 842 843 // We'll be shuffling in zeroes. 844 Value *Res = Constant::getNullValue(VecTy); 845 846 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 847 // we'll just return the zero vector. 848 if (Shift < 16) { 849 uint32_t Idxs[64]; 850 // 256/512-bit version is split into 2/4 16-byte lanes. 851 for (unsigned l = 0; l != NumElts; l += 16) 852 for (unsigned i = 0; i != 16; ++i) { 853 unsigned Idx = i + Shift; 854 if (Idx >= 16) 855 Idx += NumElts - 16; // end of lane, switch operand. 856 Idxs[l + i] = Idx + l; 857 } 858 859 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 860 } 861 862 // Bitcast back to a 64-bit element type. 863 return Builder.CreateBitCast(Res, ResultTy, "cast"); 864 } 865 866 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 867 unsigned NumElts) { 868 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 869 cast<IntegerType>(Mask->getType())->getBitWidth()); 870 Mask = Builder.CreateBitCast(Mask, MaskTy); 871 872 // If we have less than 8 elements, then the starting mask was an i8 and 873 // we need to extract down to the right number of elements. 874 if (NumElts < 8) { 875 uint32_t Indices[4]; 876 for (unsigned i = 0; i != NumElts; ++i) 877 Indices[i] = i; 878 Mask = Builder.CreateShuffleVector(Mask, Mask, 879 makeArrayRef(Indices, NumElts), 880 "extract"); 881 } 882 883 return Mask; 884 } 885 886 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 887 Value *Op0, Value *Op1) { 888 // If the mask is all ones just emit the first operation. 889 if (const auto *C = dyn_cast<Constant>(Mask)) 890 if (C->isAllOnesValue()) 891 return Op0; 892 893 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 894 return Builder.CreateSelect(Mask, Op0, Op1); 895 } 896 897 static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, 898 Value *Op0, Value *Op1) { 899 // If the mask is all ones just emit the first operation. 900 if (const auto *C = dyn_cast<Constant>(Mask)) 901 if (C->isAllOnesValue()) 902 return Op0; 903 904 llvm::VectorType *MaskTy = 905 llvm::VectorType::get(Builder.getInt1Ty(), 906 Mask->getType()->getIntegerBitWidth()); 907 Mask = Builder.CreateBitCast(Mask, MaskTy); 908 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0); 909 return Builder.CreateSelect(Mask, Op0, Op1); 910 } 911 912 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 913 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 914 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 915 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 916 Value *Op1, Value *Shift, 917 Value *Passthru, Value *Mask, 918 bool IsVALIGN) { 919 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 920 921 unsigned NumElts = Op0->getType()->getVectorNumElements(); 922 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 923 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 924 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 925 926 // Mask the immediate for VALIGN. 927 if (IsVALIGN) 928 ShiftVal &= (NumElts - 1); 929 930 // If palignr is shifting the pair of vectors more than the size of two 931 // lanes, emit zero. 932 if (ShiftVal >= 32) 933 return llvm::Constant::getNullValue(Op0->getType()); 934 935 // If palignr is shifting the pair of input vectors more than one lane, 936 // but less than two lanes, convert to shifting in zeroes. 937 if (ShiftVal > 16) { 938 ShiftVal -= 16; 939 Op1 = Op0; 940 Op0 = llvm::Constant::getNullValue(Op0->getType()); 941 } 942 943 uint32_t Indices[64]; 944 // 256-bit palignr operates on 128-bit lanes so we need to handle that 945 for (unsigned l = 0; l < NumElts; l += 16) { 946 for (unsigned i = 0; i != 16; ++i) { 947 unsigned Idx = ShiftVal + i; 948 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 949 Idx += NumElts - 16; // End of lane, switch operand. 950 Indices[l + i] = Idx + l; 951 } 952 } 953 954 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 955 makeArrayRef(Indices, NumElts), 956 "palignr"); 957 958 return EmitX86Select(Builder, Mask, Align, Passthru); 959 } 960 961 static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI, 962 bool ZeroMask, bool IndexForm) { 963 Type *Ty = CI.getType(); 964 unsigned VecWidth = Ty->getPrimitiveSizeInBits(); 965 unsigned EltWidth = Ty->getScalarSizeInBits(); 966 bool IsFloat = Ty->isFPOrFPVectorTy(); 967 Intrinsic::ID IID; 968 if (VecWidth == 128 && EltWidth == 32 && IsFloat) 969 IID = Intrinsic::x86_avx512_vpermi2var_ps_128; 970 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat) 971 IID = Intrinsic::x86_avx512_vpermi2var_d_128; 972 else if (VecWidth == 128 && EltWidth == 64 && IsFloat) 973 IID = Intrinsic::x86_avx512_vpermi2var_pd_128; 974 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat) 975 IID = Intrinsic::x86_avx512_vpermi2var_q_128; 976 else if (VecWidth == 256 && EltWidth == 32 && IsFloat) 977 IID = Intrinsic::x86_avx512_vpermi2var_ps_256; 978 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 979 IID = Intrinsic::x86_avx512_vpermi2var_d_256; 980 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 981 IID = Intrinsic::x86_avx512_vpermi2var_pd_256; 982 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 983 IID = Intrinsic::x86_avx512_vpermi2var_q_256; 984 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 985 IID = Intrinsic::x86_avx512_vpermi2var_ps_512; 986 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 987 IID = Intrinsic::x86_avx512_vpermi2var_d_512; 988 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 989 IID = Intrinsic::x86_avx512_vpermi2var_pd_512; 990 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 991 IID = Intrinsic::x86_avx512_vpermi2var_q_512; 992 else if (VecWidth == 128 && EltWidth == 16) 993 IID = Intrinsic::x86_avx512_vpermi2var_hi_128; 994 else if (VecWidth == 256 && EltWidth == 16) 995 IID = Intrinsic::x86_avx512_vpermi2var_hi_256; 996 else if (VecWidth == 512 && EltWidth == 16) 997 IID = Intrinsic::x86_avx512_vpermi2var_hi_512; 998 else if (VecWidth == 128 && EltWidth == 8) 999 IID = Intrinsic::x86_avx512_vpermi2var_qi_128; 1000 else if (VecWidth == 256 && EltWidth == 8) 1001 IID = Intrinsic::x86_avx512_vpermi2var_qi_256; 1002 else if (VecWidth == 512 && EltWidth == 8) 1003 IID = Intrinsic::x86_avx512_vpermi2var_qi_512; 1004 else 1005 llvm_unreachable("Unexpected intrinsic"); 1006 1007 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1), 1008 CI.getArgOperand(2) }; 1009 1010 // If this isn't index form we need to swap operand 0 and 1. 1011 if (!IndexForm) 1012 std::swap(Args[0], Args[1]); 1013 1014 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1015 Args); 1016 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) 1017 : Builder.CreateBitCast(CI.getArgOperand(1), 1018 Ty); 1019 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru); 1020 } 1021 1022 static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI, 1023 bool IsSigned, bool IsAddition) { 1024 Type *Ty = CI.getType(); 1025 Value *Op0 = CI.getOperand(0); 1026 Value *Op1 = CI.getOperand(1); 1027 1028 Intrinsic::ID IID = 1029 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat) 1030 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat); 1031 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1032 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1}); 1033 1034 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1035 Value *VecSrc = CI.getOperand(2); 1036 Value *Mask = CI.getOperand(3); 1037 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1038 } 1039 return Res; 1040 } 1041 1042 static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI, 1043 bool IsRotateRight) { 1044 Type *Ty = CI.getType(); 1045 Value *Src = CI.getArgOperand(0); 1046 Value *Amt = CI.getArgOperand(1); 1047 1048 // Amount may be scalar immediate, in which case create a splat vector. 1049 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1050 // we only care about the lowest log2 bits anyway. 1051 if (Amt->getType() != Ty) { 1052 unsigned NumElts = Ty->getVectorNumElements(); 1053 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1054 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1055 } 1056 1057 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; 1058 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1059 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt}); 1060 1061 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1062 Value *VecSrc = CI.getOperand(2); 1063 Value *Mask = CI.getOperand(3); 1064 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1065 } 1066 return Res; 1067 } 1068 1069 static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallInst &CI, unsigned Imm, 1070 bool IsSigned) { 1071 Type *Ty = CI.getType(); 1072 Value *LHS = CI.getArgOperand(0); 1073 Value *RHS = CI.getArgOperand(1); 1074 1075 CmpInst::Predicate Pred; 1076 switch (Imm) { 1077 case 0x0: 1078 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 1079 break; 1080 case 0x1: 1081 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 1082 break; 1083 case 0x2: 1084 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 1085 break; 1086 case 0x3: 1087 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 1088 break; 1089 case 0x4: 1090 Pred = ICmpInst::ICMP_EQ; 1091 break; 1092 case 0x5: 1093 Pred = ICmpInst::ICMP_NE; 1094 break; 1095 case 0x6: 1096 return Constant::getNullValue(Ty); // FALSE 1097 case 0x7: 1098 return Constant::getAllOnesValue(Ty); // TRUE 1099 default: 1100 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate"); 1101 } 1102 1103 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS); 1104 Value *Ext = Builder.CreateSExt(Cmp, Ty); 1105 return Ext; 1106 } 1107 1108 static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI, 1109 bool IsShiftRight, bool ZeroMask) { 1110 Type *Ty = CI.getType(); 1111 Value *Op0 = CI.getArgOperand(0); 1112 Value *Op1 = CI.getArgOperand(1); 1113 Value *Amt = CI.getArgOperand(2); 1114 1115 if (IsShiftRight) 1116 std::swap(Op0, Op1); 1117 1118 // Amount may be scalar immediate, in which case create a splat vector. 1119 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1120 // we only care about the lowest log2 bits anyway. 1121 if (Amt->getType() != Ty) { 1122 unsigned NumElts = Ty->getVectorNumElements(); 1123 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1124 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1125 } 1126 1127 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl; 1128 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1129 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt}); 1130 1131 unsigned NumArgs = CI.getNumArgOperands(); 1132 if (NumArgs >= 4) { // For masked intrinsics. 1133 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) : 1134 ZeroMask ? ConstantAggregateZero::get(CI.getType()) : 1135 CI.getArgOperand(0); 1136 Value *Mask = CI.getOperand(NumArgs - 1); 1137 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1138 } 1139 return Res; 1140 } 1141 1142 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 1143 Value *Ptr, Value *Data, Value *Mask, 1144 bool Aligned) { 1145 // Cast the pointer to the right type. 1146 Ptr = Builder.CreateBitCast(Ptr, 1147 llvm::PointerType::getUnqual(Data->getType())); 1148 unsigned Align = 1149 Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1; 1150 1151 // If the mask is all ones just emit a regular store. 1152 if (const auto *C = dyn_cast<Constant>(Mask)) 1153 if (C->isAllOnesValue()) 1154 return Builder.CreateAlignedStore(Data, Ptr, Align); 1155 1156 // Convert the mask from an integer type to a vector of i1. 1157 unsigned NumElts = Data->getType()->getVectorNumElements(); 1158 Mask = getX86MaskVec(Builder, Mask, NumElts); 1159 return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); 1160 } 1161 1162 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 1163 Value *Ptr, Value *Passthru, Value *Mask, 1164 bool Aligned) { 1165 Type *ValTy = Passthru->getType(); 1166 // Cast the pointer to the right type. 1167 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy)); 1168 unsigned Align = 1169 Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1; 1170 1171 // If the mask is all ones just emit a regular store. 1172 if (const auto *C = dyn_cast<Constant>(Mask)) 1173 if (C->isAllOnesValue()) 1174 return Builder.CreateAlignedLoad(ValTy, Ptr, Align); 1175 1176 // Convert the mask from an integer type to a vector of i1. 1177 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 1178 Mask = getX86MaskVec(Builder, Mask, NumElts); 1179 return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); 1180 } 1181 1182 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { 1183 Value *Op0 = CI.getArgOperand(0); 1184 llvm::Type *Ty = Op0->getType(); 1185 Value *Zero = llvm::Constant::getNullValue(Ty); 1186 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero); 1187 Value *Neg = Builder.CreateNeg(Op0); 1188 Value *Res = Builder.CreateSelect(Cmp, Op0, Neg); 1189 1190 if (CI.getNumArgOperands() == 3) 1191 Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1)); 1192 1193 return Res; 1194 } 1195 1196 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 1197 ICmpInst::Predicate Pred) { 1198 Value *Op0 = CI.getArgOperand(0); 1199 Value *Op1 = CI.getArgOperand(1); 1200 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 1201 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 1202 1203 if (CI.getNumArgOperands() == 4) 1204 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1205 1206 return Res; 1207 } 1208 1209 static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) { 1210 Type *Ty = CI.getType(); 1211 1212 // Arguments have a vXi32 type so cast to vXi64. 1213 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty); 1214 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty); 1215 1216 if (IsSigned) { 1217 // Shift left then arithmetic shift right. 1218 Constant *ShiftAmt = ConstantInt::get(Ty, 32); 1219 LHS = Builder.CreateShl(LHS, ShiftAmt); 1220 LHS = Builder.CreateAShr(LHS, ShiftAmt); 1221 RHS = Builder.CreateShl(RHS, ShiftAmt); 1222 RHS = Builder.CreateAShr(RHS, ShiftAmt); 1223 } else { 1224 // Clear the upper bits. 1225 Constant *Mask = ConstantInt::get(Ty, 0xffffffff); 1226 LHS = Builder.CreateAnd(LHS, Mask); 1227 RHS = Builder.CreateAnd(RHS, Mask); 1228 } 1229 1230 Value *Res = Builder.CreateMul(LHS, RHS); 1231 1232 if (CI.getNumArgOperands() == 4) 1233 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1234 1235 return Res; 1236 } 1237 1238 // Applying mask on vector of i1's and make sure result is at least 8 bits wide. 1239 static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, 1240 Value *Mask) { 1241 unsigned NumElts = Vec->getType()->getVectorNumElements(); 1242 if (Mask) { 1243 const auto *C = dyn_cast<Constant>(Mask); 1244 if (!C || !C->isAllOnesValue()) 1245 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts)); 1246 } 1247 1248 if (NumElts < 8) { 1249 uint32_t Indices[8]; 1250 for (unsigned i = 0; i != NumElts; ++i) 1251 Indices[i] = i; 1252 for (unsigned i = NumElts; i != 8; ++i) 1253 Indices[i] = NumElts + i % NumElts; 1254 Vec = Builder.CreateShuffleVector(Vec, 1255 Constant::getNullValue(Vec->getType()), 1256 Indices); 1257 } 1258 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U))); 1259 } 1260 1261 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 1262 unsigned CC, bool Signed) { 1263 Value *Op0 = CI.getArgOperand(0); 1264 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1265 1266 Value *Cmp; 1267 if (CC == 3) { 1268 Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1269 } else if (CC == 7) { 1270 Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1271 } else { 1272 ICmpInst::Predicate Pred; 1273 switch (CC) { 1274 default: llvm_unreachable("Unknown condition code"); 1275 case 0: Pred = ICmpInst::ICMP_EQ; break; 1276 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 1277 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 1278 case 4: Pred = ICmpInst::ICMP_NE; break; 1279 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 1280 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 1281 } 1282 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 1283 } 1284 1285 Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1); 1286 1287 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask); 1288 } 1289 1290 // Replace a masked intrinsic with an older unmasked intrinsic. 1291 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 1292 Intrinsic::ID IID) { 1293 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID); 1294 Value *Rep = Builder.CreateCall(Intrin, 1295 { CI.getArgOperand(0), CI.getArgOperand(1) }); 1296 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 1297 } 1298 1299 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 1300 Value* A = CI.getArgOperand(0); 1301 Value* B = CI.getArgOperand(1); 1302 Value* Src = CI.getArgOperand(2); 1303 Value* Mask = CI.getArgOperand(3); 1304 1305 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 1306 Value* Cmp = Builder.CreateIsNotNull(AndNode); 1307 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 1308 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 1309 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 1310 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 1311 } 1312 1313 1314 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 1315 Value* Op = CI.getArgOperand(0); 1316 Type* ReturnOp = CI.getType(); 1317 unsigned NumElts = CI.getType()->getVectorNumElements(); 1318 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 1319 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 1320 } 1321 1322 // Replace intrinsic with unmasked version and a select. 1323 static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, 1324 CallInst &CI, Value *&Rep) { 1325 Name = Name.substr(12); // Remove avx512.mask. 1326 1327 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits(); 1328 unsigned EltWidth = CI.getType()->getScalarSizeInBits(); 1329 Intrinsic::ID IID; 1330 if (Name.startswith("max.p")) { 1331 if (VecWidth == 128 && EltWidth == 32) 1332 IID = Intrinsic::x86_sse_max_ps; 1333 else if (VecWidth == 128 && EltWidth == 64) 1334 IID = Intrinsic::x86_sse2_max_pd; 1335 else if (VecWidth == 256 && EltWidth == 32) 1336 IID = Intrinsic::x86_avx_max_ps_256; 1337 else if (VecWidth == 256 && EltWidth == 64) 1338 IID = Intrinsic::x86_avx_max_pd_256; 1339 else 1340 llvm_unreachable("Unexpected intrinsic"); 1341 } else if (Name.startswith("min.p")) { 1342 if (VecWidth == 128 && EltWidth == 32) 1343 IID = Intrinsic::x86_sse_min_ps; 1344 else if (VecWidth == 128 && EltWidth == 64) 1345 IID = Intrinsic::x86_sse2_min_pd; 1346 else if (VecWidth == 256 && EltWidth == 32) 1347 IID = Intrinsic::x86_avx_min_ps_256; 1348 else if (VecWidth == 256 && EltWidth == 64) 1349 IID = Intrinsic::x86_avx_min_pd_256; 1350 else 1351 llvm_unreachable("Unexpected intrinsic"); 1352 } else if (Name.startswith("pshuf.b.")) { 1353 if (VecWidth == 128) 1354 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1355 else if (VecWidth == 256) 1356 IID = Intrinsic::x86_avx2_pshuf_b; 1357 else if (VecWidth == 512) 1358 IID = Intrinsic::x86_avx512_pshuf_b_512; 1359 else 1360 llvm_unreachable("Unexpected intrinsic"); 1361 } else if (Name.startswith("pmul.hr.sw.")) { 1362 if (VecWidth == 128) 1363 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128; 1364 else if (VecWidth == 256) 1365 IID = Intrinsic::x86_avx2_pmul_hr_sw; 1366 else if (VecWidth == 512) 1367 IID = Intrinsic::x86_avx512_pmul_hr_sw_512; 1368 else 1369 llvm_unreachable("Unexpected intrinsic"); 1370 } else if (Name.startswith("pmulh.w.")) { 1371 if (VecWidth == 128) 1372 IID = Intrinsic::x86_sse2_pmulh_w; 1373 else if (VecWidth == 256) 1374 IID = Intrinsic::x86_avx2_pmulh_w; 1375 else if (VecWidth == 512) 1376 IID = Intrinsic::x86_avx512_pmulh_w_512; 1377 else 1378 llvm_unreachable("Unexpected intrinsic"); 1379 } else if (Name.startswith("pmulhu.w.")) { 1380 if (VecWidth == 128) 1381 IID = Intrinsic::x86_sse2_pmulhu_w; 1382 else if (VecWidth == 256) 1383 IID = Intrinsic::x86_avx2_pmulhu_w; 1384 else if (VecWidth == 512) 1385 IID = Intrinsic::x86_avx512_pmulhu_w_512; 1386 else 1387 llvm_unreachable("Unexpected intrinsic"); 1388 } else if (Name.startswith("pmaddw.d.")) { 1389 if (VecWidth == 128) 1390 IID = Intrinsic::x86_sse2_pmadd_wd; 1391 else if (VecWidth == 256) 1392 IID = Intrinsic::x86_avx2_pmadd_wd; 1393 else if (VecWidth == 512) 1394 IID = Intrinsic::x86_avx512_pmaddw_d_512; 1395 else 1396 llvm_unreachable("Unexpected intrinsic"); 1397 } else if (Name.startswith("pmaddubs.w.")) { 1398 if (VecWidth == 128) 1399 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128; 1400 else if (VecWidth == 256) 1401 IID = Intrinsic::x86_avx2_pmadd_ub_sw; 1402 else if (VecWidth == 512) 1403 IID = Intrinsic::x86_avx512_pmaddubs_w_512; 1404 else 1405 llvm_unreachable("Unexpected intrinsic"); 1406 } else if (Name.startswith("packsswb.")) { 1407 if (VecWidth == 128) 1408 IID = Intrinsic::x86_sse2_packsswb_128; 1409 else if (VecWidth == 256) 1410 IID = Intrinsic::x86_avx2_packsswb; 1411 else if (VecWidth == 512) 1412 IID = Intrinsic::x86_avx512_packsswb_512; 1413 else 1414 llvm_unreachable("Unexpected intrinsic"); 1415 } else if (Name.startswith("packssdw.")) { 1416 if (VecWidth == 128) 1417 IID = Intrinsic::x86_sse2_packssdw_128; 1418 else if (VecWidth == 256) 1419 IID = Intrinsic::x86_avx2_packssdw; 1420 else if (VecWidth == 512) 1421 IID = Intrinsic::x86_avx512_packssdw_512; 1422 else 1423 llvm_unreachable("Unexpected intrinsic"); 1424 } else if (Name.startswith("packuswb.")) { 1425 if (VecWidth == 128) 1426 IID = Intrinsic::x86_sse2_packuswb_128; 1427 else if (VecWidth == 256) 1428 IID = Intrinsic::x86_avx2_packuswb; 1429 else if (VecWidth == 512) 1430 IID = Intrinsic::x86_avx512_packuswb_512; 1431 else 1432 llvm_unreachable("Unexpected intrinsic"); 1433 } else if (Name.startswith("packusdw.")) { 1434 if (VecWidth == 128) 1435 IID = Intrinsic::x86_sse41_packusdw; 1436 else if (VecWidth == 256) 1437 IID = Intrinsic::x86_avx2_packusdw; 1438 else if (VecWidth == 512) 1439 IID = Intrinsic::x86_avx512_packusdw_512; 1440 else 1441 llvm_unreachable("Unexpected intrinsic"); 1442 } else if (Name.startswith("vpermilvar.")) { 1443 if (VecWidth == 128 && EltWidth == 32) 1444 IID = Intrinsic::x86_avx_vpermilvar_ps; 1445 else if (VecWidth == 128 && EltWidth == 64) 1446 IID = Intrinsic::x86_avx_vpermilvar_pd; 1447 else if (VecWidth == 256 && EltWidth == 32) 1448 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 1449 else if (VecWidth == 256 && EltWidth == 64) 1450 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 1451 else if (VecWidth == 512 && EltWidth == 32) 1452 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 1453 else if (VecWidth == 512 && EltWidth == 64) 1454 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 1455 else 1456 llvm_unreachable("Unexpected intrinsic"); 1457 } else if (Name == "cvtpd2dq.256") { 1458 IID = Intrinsic::x86_avx_cvt_pd2dq_256; 1459 } else if (Name == "cvtpd2ps.256") { 1460 IID = Intrinsic::x86_avx_cvt_pd2_ps_256; 1461 } else if (Name == "cvttpd2dq.256") { 1462 IID = Intrinsic::x86_avx_cvtt_pd2dq_256; 1463 } else if (Name == "cvttps2dq.128") { 1464 IID = Intrinsic::x86_sse2_cvttps2dq; 1465 } else if (Name == "cvttps2dq.256") { 1466 IID = Intrinsic::x86_avx_cvtt_ps2dq_256; 1467 } else if (Name.startswith("permvar.")) { 1468 bool IsFloat = CI.getType()->isFPOrFPVectorTy(); 1469 if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1470 IID = Intrinsic::x86_avx2_permps; 1471 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1472 IID = Intrinsic::x86_avx2_permd; 1473 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1474 IID = Intrinsic::x86_avx512_permvar_df_256; 1475 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1476 IID = Intrinsic::x86_avx512_permvar_di_256; 1477 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1478 IID = Intrinsic::x86_avx512_permvar_sf_512; 1479 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1480 IID = Intrinsic::x86_avx512_permvar_si_512; 1481 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1482 IID = Intrinsic::x86_avx512_permvar_df_512; 1483 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1484 IID = Intrinsic::x86_avx512_permvar_di_512; 1485 else if (VecWidth == 128 && EltWidth == 16) 1486 IID = Intrinsic::x86_avx512_permvar_hi_128; 1487 else if (VecWidth == 256 && EltWidth == 16) 1488 IID = Intrinsic::x86_avx512_permvar_hi_256; 1489 else if (VecWidth == 512 && EltWidth == 16) 1490 IID = Intrinsic::x86_avx512_permvar_hi_512; 1491 else if (VecWidth == 128 && EltWidth == 8) 1492 IID = Intrinsic::x86_avx512_permvar_qi_128; 1493 else if (VecWidth == 256 && EltWidth == 8) 1494 IID = Intrinsic::x86_avx512_permvar_qi_256; 1495 else if (VecWidth == 512 && EltWidth == 8) 1496 IID = Intrinsic::x86_avx512_permvar_qi_512; 1497 else 1498 llvm_unreachable("Unexpected intrinsic"); 1499 } else if (Name.startswith("dbpsadbw.")) { 1500 if (VecWidth == 128) 1501 IID = Intrinsic::x86_avx512_dbpsadbw_128; 1502 else if (VecWidth == 256) 1503 IID = Intrinsic::x86_avx512_dbpsadbw_256; 1504 else if (VecWidth == 512) 1505 IID = Intrinsic::x86_avx512_dbpsadbw_512; 1506 else 1507 llvm_unreachable("Unexpected intrinsic"); 1508 } else if (Name.startswith("pmultishift.qb.")) { 1509 if (VecWidth == 128) 1510 IID = Intrinsic::x86_avx512_pmultishift_qb_128; 1511 else if (VecWidth == 256) 1512 IID = Intrinsic::x86_avx512_pmultishift_qb_256; 1513 else if (VecWidth == 512) 1514 IID = Intrinsic::x86_avx512_pmultishift_qb_512; 1515 else 1516 llvm_unreachable("Unexpected intrinsic"); 1517 } else if (Name.startswith("conflict.")) { 1518 if (Name[9] == 'd' && VecWidth == 128) 1519 IID = Intrinsic::x86_avx512_conflict_d_128; 1520 else if (Name[9] == 'd' && VecWidth == 256) 1521 IID = Intrinsic::x86_avx512_conflict_d_256; 1522 else if (Name[9] == 'd' && VecWidth == 512) 1523 IID = Intrinsic::x86_avx512_conflict_d_512; 1524 else if (Name[9] == 'q' && VecWidth == 128) 1525 IID = Intrinsic::x86_avx512_conflict_q_128; 1526 else if (Name[9] == 'q' && VecWidth == 256) 1527 IID = Intrinsic::x86_avx512_conflict_q_256; 1528 else if (Name[9] == 'q' && VecWidth == 512) 1529 IID = Intrinsic::x86_avx512_conflict_q_512; 1530 else 1531 llvm_unreachable("Unexpected intrinsic"); 1532 } else 1533 return false; 1534 1535 SmallVector<Value *, 4> Args(CI.arg_operands().begin(), 1536 CI.arg_operands().end()); 1537 Args.pop_back(); 1538 Args.pop_back(); 1539 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1540 Args); 1541 unsigned NumArgs = CI.getNumArgOperands(); 1542 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep, 1543 CI.getArgOperand(NumArgs - 2)); 1544 return true; 1545 } 1546 1547 /// Upgrade comment in call to inline asm that represents an objc retain release 1548 /// marker. 1549 void llvm::UpgradeInlineAsmString(std::string *AsmStr) { 1550 size_t Pos; 1551 if (AsmStr->find("mov\tfp") == 0 && 1552 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos && 1553 (Pos = AsmStr->find("# marker")) != std::string::npos) { 1554 AsmStr->replace(Pos, 1, ";"); 1555 } 1556 return; 1557 } 1558 1559 /// Upgrade a call to an old intrinsic. All argument and return casting must be 1560 /// provided to seamlessly integrate with existing context. 1561 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 1562 Function *F = CI->getCalledFunction(); 1563 LLVMContext &C = CI->getContext(); 1564 IRBuilder<> Builder(C); 1565 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 1566 1567 assert(F && "Intrinsic call is not direct?"); 1568 1569 if (!NewFn) { 1570 // Get the Function's name. 1571 StringRef Name = F->getName(); 1572 1573 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 1574 Name = Name.substr(5); 1575 1576 bool IsX86 = Name.startswith("x86."); 1577 if (IsX86) 1578 Name = Name.substr(4); 1579 bool IsNVVM = Name.startswith("nvvm."); 1580 if (IsNVVM) 1581 Name = Name.substr(5); 1582 1583 if (IsX86 && Name.startswith("sse4a.movnt.")) { 1584 Module *M = F->getParent(); 1585 SmallVector<Metadata *, 1> Elts; 1586 Elts.push_back( 1587 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1588 MDNode *Node = MDNode::get(C, Elts); 1589 1590 Value *Arg0 = CI->getArgOperand(0); 1591 Value *Arg1 = CI->getArgOperand(1); 1592 1593 // Nontemporal (unaligned) store of the 0'th element of the float/double 1594 // vector. 1595 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 1596 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 1597 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 1598 Value *Extract = 1599 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 1600 1601 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); 1602 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1603 1604 // Remove intrinsic. 1605 CI->eraseFromParent(); 1606 return; 1607 } 1608 1609 if (IsX86 && (Name.startswith("avx.movnt.") || 1610 Name.startswith("avx512.storent."))) { 1611 Module *M = F->getParent(); 1612 SmallVector<Metadata *, 1> Elts; 1613 Elts.push_back( 1614 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1615 MDNode *Node = MDNode::get(C, Elts); 1616 1617 Value *Arg0 = CI->getArgOperand(0); 1618 Value *Arg1 = CI->getArgOperand(1); 1619 1620 // Convert the type of the pointer to a pointer to the stored type. 1621 Value *BC = Builder.CreateBitCast(Arg0, 1622 PointerType::getUnqual(Arg1->getType()), 1623 "cast"); 1624 VectorType *VTy = cast<VectorType>(Arg1->getType()); 1625 StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 1626 VTy->getBitWidth() / 8); 1627 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1628 1629 // Remove intrinsic. 1630 CI->eraseFromParent(); 1631 return; 1632 } 1633 1634 if (IsX86 && Name == "sse2.storel.dq") { 1635 Value *Arg0 = CI->getArgOperand(0); 1636 Value *Arg1 = CI->getArgOperand(1); 1637 1638 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 1639 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 1640 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 1641 Value *BC = Builder.CreateBitCast(Arg0, 1642 PointerType::getUnqual(Elt->getType()), 1643 "cast"); 1644 Builder.CreateAlignedStore(Elt, BC, 1); 1645 1646 // Remove intrinsic. 1647 CI->eraseFromParent(); 1648 return; 1649 } 1650 1651 if (IsX86 && (Name.startswith("sse.storeu.") || 1652 Name.startswith("sse2.storeu.") || 1653 Name.startswith("avx.storeu."))) { 1654 Value *Arg0 = CI->getArgOperand(0); 1655 Value *Arg1 = CI->getArgOperand(1); 1656 1657 Arg0 = Builder.CreateBitCast(Arg0, 1658 PointerType::getUnqual(Arg1->getType()), 1659 "cast"); 1660 Builder.CreateAlignedStore(Arg1, Arg0, 1); 1661 1662 // Remove intrinsic. 1663 CI->eraseFromParent(); 1664 return; 1665 } 1666 1667 if (IsX86 && Name == "avx512.mask.store.ss") { 1668 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1)); 1669 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1670 Mask, false); 1671 1672 // Remove intrinsic. 1673 CI->eraseFromParent(); 1674 return; 1675 } 1676 1677 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 1678 // "avx512.mask.storeu." or "avx512.mask.store." 1679 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 1680 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1681 CI->getArgOperand(2), Aligned); 1682 1683 // Remove intrinsic. 1684 CI->eraseFromParent(); 1685 return; 1686 } 1687 1688 Value *Rep; 1689 // Upgrade packed integer vector compare intrinsics to compare instructions. 1690 if (IsX86 && (Name.startswith("sse2.pcmp") || 1691 Name.startswith("avx2.pcmp"))) { 1692 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 1693 bool CmpEq = Name[9] == 'e'; 1694 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 1695 CI->getArgOperand(0), CI->getArgOperand(1)); 1696 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 1697 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) { 1698 Type *ExtTy = Type::getInt32Ty(C); 1699 if (CI->getOperand(0)->getType()->isIntegerTy(8)) 1700 ExtTy = Type::getInt64Ty(C); 1701 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 1702 ExtTy->getPrimitiveSizeInBits(); 1703 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy); 1704 Rep = Builder.CreateVectorSplat(NumElts, Rep); 1705 } else if (IsX86 && (Name == "sse.sqrt.ss" || 1706 Name == "sse2.sqrt.sd")) { 1707 Value *Vec = CI->getArgOperand(0); 1708 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0); 1709 Function *Intr = Intrinsic::getDeclaration(F->getParent(), 1710 Intrinsic::sqrt, Elt0->getType()); 1711 Elt0 = Builder.CreateCall(Intr, Elt0); 1712 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0); 1713 } else if (IsX86 && (Name.startswith("avx.sqrt.p") || 1714 Name.startswith("sse2.sqrt.p") || 1715 Name.startswith("sse.sqrt.p"))) { 1716 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1717 Intrinsic::sqrt, 1718 CI->getType()), 1719 {CI->getArgOperand(0)}); 1720 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) { 1721 if (CI->getNumArgOperands() == 4 && 1722 (!isa<ConstantInt>(CI->getArgOperand(3)) || 1723 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 1724 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512 1725 : Intrinsic::x86_avx512_sqrt_pd_512; 1726 1727 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) }; 1728 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 1729 IID), Args); 1730 } else { 1731 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1732 Intrinsic::sqrt, 1733 CI->getType()), 1734 {CI->getArgOperand(0)}); 1735 } 1736 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1737 CI->getArgOperand(1)); 1738 } else if (IsX86 && (Name.startswith("avx512.ptestm") || 1739 Name.startswith("avx512.ptestnm"))) { 1740 Value *Op0 = CI->getArgOperand(0); 1741 Value *Op1 = CI->getArgOperand(1); 1742 Value *Mask = CI->getArgOperand(2); 1743 Rep = Builder.CreateAnd(Op0, Op1); 1744 llvm::Type *Ty = Op0->getType(); 1745 Value *Zero = llvm::Constant::getNullValue(Ty); 1746 ICmpInst::Predicate Pred = 1747 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ; 1748 Rep = Builder.CreateICmp(Pred, Rep, Zero); 1749 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); 1750 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ 1751 unsigned NumElts = 1752 CI->getArgOperand(1)->getType()->getVectorNumElements(); 1753 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); 1754 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1755 CI->getArgOperand(1)); 1756 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) { 1757 unsigned NumElts = CI->getType()->getScalarSizeInBits(); 1758 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts); 1759 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts); 1760 uint32_t Indices[64]; 1761 for (unsigned i = 0; i != NumElts; ++i) 1762 Indices[i] = i; 1763 1764 // First extract half of each vector. This gives better codegen than 1765 // doing it in a single shuffle. 1766 LHS = Builder.CreateShuffleVector(LHS, LHS, 1767 makeArrayRef(Indices, NumElts / 2)); 1768 RHS = Builder.CreateShuffleVector(RHS, RHS, 1769 makeArrayRef(Indices, NumElts / 2)); 1770 // Concat the vectors. 1771 // NOTE: Operands have to be swapped to match intrinsic definition. 1772 Rep = Builder.CreateShuffleVector(RHS, LHS, 1773 makeArrayRef(Indices, NumElts)); 1774 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1775 } else if (IsX86 && Name == "avx512.kand.w") { 1776 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1777 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1778 Rep = Builder.CreateAnd(LHS, RHS); 1779 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1780 } else if (IsX86 && Name == "avx512.kandn.w") { 1781 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1782 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1783 LHS = Builder.CreateNot(LHS); 1784 Rep = Builder.CreateAnd(LHS, RHS); 1785 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1786 } else if (IsX86 && Name == "avx512.kor.w") { 1787 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1788 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1789 Rep = Builder.CreateOr(LHS, RHS); 1790 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1791 } else if (IsX86 && Name == "avx512.kxor.w") { 1792 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1793 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1794 Rep = Builder.CreateXor(LHS, RHS); 1795 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1796 } else if (IsX86 && Name == "avx512.kxnor.w") { 1797 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1798 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1799 LHS = Builder.CreateNot(LHS); 1800 Rep = Builder.CreateXor(LHS, RHS); 1801 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1802 } else if (IsX86 && Name == "avx512.knot.w") { 1803 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1804 Rep = Builder.CreateNot(Rep); 1805 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1806 } else if (IsX86 && 1807 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) { 1808 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1809 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1810 Rep = Builder.CreateOr(LHS, RHS); 1811 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty()); 1812 Value *C; 1813 if (Name[14] == 'c') 1814 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty()); 1815 else 1816 C = ConstantInt::getNullValue(Builder.getInt16Ty()); 1817 Rep = Builder.CreateICmpEQ(Rep, C); 1818 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty()); 1819 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" || 1820 Name == "sse.sub.ss" || Name == "sse2.sub.sd" || 1821 Name == "sse.mul.ss" || Name == "sse2.mul.sd" || 1822 Name == "sse.div.ss" || Name == "sse2.div.sd")) { 1823 Type *I32Ty = Type::getInt32Ty(C); 1824 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1825 ConstantInt::get(I32Ty, 0)); 1826 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1827 ConstantInt::get(I32Ty, 0)); 1828 Value *EltOp; 1829 if (Name.contains(".add.")) 1830 EltOp = Builder.CreateFAdd(Elt0, Elt1); 1831 else if (Name.contains(".sub.")) 1832 EltOp = Builder.CreateFSub(Elt0, Elt1); 1833 else if (Name.contains(".mul.")) 1834 EltOp = Builder.CreateFMul(Elt0, Elt1); 1835 else 1836 EltOp = Builder.CreateFDiv(Elt0, Elt1); 1837 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp, 1838 ConstantInt::get(I32Ty, 0)); 1839 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 1840 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 1841 bool CmpEq = Name[16] == 'e'; 1842 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true); 1843 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) { 1844 Type *OpTy = CI->getArgOperand(0)->getType(); 1845 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1846 Intrinsic::ID IID; 1847 switch (VecWidth) { 1848 default: llvm_unreachable("Unexpected intrinsic"); 1849 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break; 1850 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break; 1851 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break; 1852 } 1853 1854 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1855 { CI->getOperand(0), CI->getArgOperand(1) }); 1856 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1857 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) { 1858 Type *OpTy = CI->getArgOperand(0)->getType(); 1859 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1860 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1861 Intrinsic::ID IID; 1862 if (VecWidth == 128 && EltWidth == 32) 1863 IID = Intrinsic::x86_avx512_fpclass_ps_128; 1864 else if (VecWidth == 256 && EltWidth == 32) 1865 IID = Intrinsic::x86_avx512_fpclass_ps_256; 1866 else if (VecWidth == 512 && EltWidth == 32) 1867 IID = Intrinsic::x86_avx512_fpclass_ps_512; 1868 else if (VecWidth == 128 && EltWidth == 64) 1869 IID = Intrinsic::x86_avx512_fpclass_pd_128; 1870 else if (VecWidth == 256 && EltWidth == 64) 1871 IID = Intrinsic::x86_avx512_fpclass_pd_256; 1872 else if (VecWidth == 512 && EltWidth == 64) 1873 IID = Intrinsic::x86_avx512_fpclass_pd_512; 1874 else 1875 llvm_unreachable("Unexpected intrinsic"); 1876 1877 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1878 { CI->getOperand(0), CI->getArgOperand(1) }); 1879 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1880 } else if (IsX86 && Name.startswith("avx512.mask.cmp.p")) { 1881 Type *OpTy = CI->getArgOperand(0)->getType(); 1882 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1883 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1884 Intrinsic::ID IID; 1885 if (VecWidth == 128 && EltWidth == 32) 1886 IID = Intrinsic::x86_avx512_cmp_ps_128; 1887 else if (VecWidth == 256 && EltWidth == 32) 1888 IID = Intrinsic::x86_avx512_cmp_ps_256; 1889 else if (VecWidth == 512 && EltWidth == 32) 1890 IID = Intrinsic::x86_avx512_cmp_ps_512; 1891 else if (VecWidth == 128 && EltWidth == 64) 1892 IID = Intrinsic::x86_avx512_cmp_pd_128; 1893 else if (VecWidth == 256 && EltWidth == 64) 1894 IID = Intrinsic::x86_avx512_cmp_pd_256; 1895 else if (VecWidth == 512 && EltWidth == 64) 1896 IID = Intrinsic::x86_avx512_cmp_pd_512; 1897 else 1898 llvm_unreachable("Unexpected intrinsic"); 1899 1900 SmallVector<Value *, 4> Args; 1901 Args.push_back(CI->getArgOperand(0)); 1902 Args.push_back(CI->getArgOperand(1)); 1903 Args.push_back(CI->getArgOperand(2)); 1904 if (CI->getNumArgOperands() == 5) 1905 Args.push_back(CI->getArgOperand(4)); 1906 1907 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1908 Args); 1909 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(3)); 1910 } else if (IsX86 && Name.startswith("avx512.mask.cmp.") && 1911 Name[16] != 'p') { 1912 // Integer compare intrinsics. 1913 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1914 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true); 1915 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) { 1916 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1917 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false); 1918 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") || 1919 Name.startswith("avx512.cvtw2mask.") || 1920 Name.startswith("avx512.cvtd2mask.") || 1921 Name.startswith("avx512.cvtq2mask."))) { 1922 Value *Op = CI->getArgOperand(0); 1923 Value *Zero = llvm::Constant::getNullValue(Op->getType()); 1924 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero); 1925 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr); 1926 } else if(IsX86 && (Name == "ssse3.pabs.b.128" || 1927 Name == "ssse3.pabs.w.128" || 1928 Name == "ssse3.pabs.d.128" || 1929 Name.startswith("avx2.pabs") || 1930 Name.startswith("avx512.mask.pabs"))) { 1931 Rep = upgradeAbs(Builder, *CI); 1932 } else if (IsX86 && (Name == "sse41.pmaxsb" || 1933 Name == "sse2.pmaxs.w" || 1934 Name == "sse41.pmaxsd" || 1935 Name.startswith("avx2.pmaxs") || 1936 Name.startswith("avx512.mask.pmaxs"))) { 1937 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 1938 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 1939 Name == "sse41.pmaxuw" || 1940 Name == "sse41.pmaxud" || 1941 Name.startswith("avx2.pmaxu") || 1942 Name.startswith("avx512.mask.pmaxu"))) { 1943 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 1944 } else if (IsX86 && (Name == "sse41.pminsb" || 1945 Name == "sse2.pmins.w" || 1946 Name == "sse41.pminsd" || 1947 Name.startswith("avx2.pmins") || 1948 Name.startswith("avx512.mask.pmins"))) { 1949 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 1950 } else if (IsX86 && (Name == "sse2.pminu.b" || 1951 Name == "sse41.pminuw" || 1952 Name == "sse41.pminud" || 1953 Name.startswith("avx2.pminu") || 1954 Name.startswith("avx512.mask.pminu"))) { 1955 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 1956 } else if (IsX86 && (Name == "sse2.pmulu.dq" || 1957 Name == "avx2.pmulu.dq" || 1958 Name == "avx512.pmulu.dq.512" || 1959 Name.startswith("avx512.mask.pmulu.dq."))) { 1960 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false); 1961 } else if (IsX86 && (Name == "sse41.pmuldq" || 1962 Name == "avx2.pmul.dq" || 1963 Name == "avx512.pmul.dq.512" || 1964 Name.startswith("avx512.mask.pmul.dq."))) { 1965 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true); 1966 } else if (IsX86 && (Name == "sse.cvtsi2ss" || 1967 Name == "sse2.cvtsi2sd" || 1968 Name == "sse.cvtsi642ss" || 1969 Name == "sse2.cvtsi642sd")) { 1970 Rep = Builder.CreateSIToFP(CI->getArgOperand(1), 1971 CI->getType()->getVectorElementType()); 1972 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1973 } else if (IsX86 && Name == "avx512.cvtusi2sd") { 1974 Rep = Builder.CreateUIToFP(CI->getArgOperand(1), 1975 CI->getType()->getVectorElementType()); 1976 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1977 } else if (IsX86 && Name == "sse2.cvtss2sd") { 1978 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0); 1979 Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType()); 1980 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1981 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 1982 Name == "sse2.cvtdq2ps" || 1983 Name == "avx.cvtdq2.pd.256" || 1984 Name == "avx.cvtdq2.ps.256" || 1985 Name.startswith("avx512.mask.cvtdq2pd.") || 1986 Name.startswith("avx512.mask.cvtudq2pd.") || 1987 Name.startswith("avx512.mask.cvtdq2ps.") || 1988 Name.startswith("avx512.mask.cvtudq2ps.") || 1989 Name.startswith("avx512.mask.cvtqq2pd.") || 1990 Name.startswith("avx512.mask.cvtuqq2pd.") || 1991 Name == "avx512.mask.cvtqq2ps.256" || 1992 Name == "avx512.mask.cvtqq2ps.512" || 1993 Name == "avx512.mask.cvtuqq2ps.256" || 1994 Name == "avx512.mask.cvtuqq2ps.512" || 1995 Name == "sse2.cvtps2pd" || 1996 Name == "avx.cvt.ps2.pd.256" || 1997 Name == "avx512.mask.cvtps2pd.128" || 1998 Name == "avx512.mask.cvtps2pd.256")) { 1999 Type *DstTy = CI->getType(); 2000 Rep = CI->getArgOperand(0); 2001 Type *SrcTy = Rep->getType(); 2002 2003 unsigned NumDstElts = DstTy->getVectorNumElements(); 2004 if (NumDstElts < SrcTy->getVectorNumElements()) { 2005 assert(NumDstElts == 2 && "Unexpected vector size"); 2006 uint32_t ShuffleMask[2] = { 0, 1 }; 2007 Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); 2008 } 2009 2010 bool IsPS2PD = SrcTy->getVectorElementType()->isFloatTy(); 2011 bool IsUnsigned = (StringRef::npos != Name.find("cvtu")); 2012 if (IsPS2PD) 2013 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 2014 else if (CI->getNumArgOperands() == 4 && 2015 (!isa<ConstantInt>(CI->getArgOperand(3)) || 2016 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 2017 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round 2018 : Intrinsic::x86_avx512_sitofp_round; 2019 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID, 2020 { DstTy, SrcTy }); 2021 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) }); 2022 } else { 2023 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt") 2024 : Builder.CreateSIToFP(Rep, DstTy, "cvt"); 2025 } 2026 2027 if (CI->getNumArgOperands() >= 3) 2028 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2029 CI->getArgOperand(1)); 2030 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 2031 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2032 CI->getArgOperand(1), CI->getArgOperand(2), 2033 /*Aligned*/false); 2034 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 2035 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2036 CI->getArgOperand(1),CI->getArgOperand(2), 2037 /*Aligned*/true); 2038 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { 2039 Type *ResultTy = CI->getType(); 2040 Type *PtrTy = ResultTy->getVectorElementType(); 2041 2042 // Cast the pointer to element type. 2043 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2044 llvm::PointerType::getUnqual(PtrTy)); 2045 2046 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2047 ResultTy->getVectorNumElements()); 2048 2049 Function *ELd = Intrinsic::getDeclaration(F->getParent(), 2050 Intrinsic::masked_expandload, 2051 ResultTy); 2052 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); 2053 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { 2054 Type *ResultTy = CI->getArgOperand(1)->getType(); 2055 Type *PtrTy = ResultTy->getVectorElementType(); 2056 2057 // Cast the pointer to element type. 2058 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2059 llvm::PointerType::getUnqual(PtrTy)); 2060 2061 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2062 ResultTy->getVectorNumElements()); 2063 2064 Function *CSt = Intrinsic::getDeclaration(F->getParent(), 2065 Intrinsic::masked_compressstore, 2066 ResultTy); 2067 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); 2068 } else if (IsX86 && (Name.startswith("avx512.mask.compress.") || 2069 Name.startswith("avx512.mask.expand."))) { 2070 Type *ResultTy = CI->getType(); 2071 2072 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2073 ResultTy->getVectorNumElements()); 2074 2075 bool IsCompress = Name[12] == 'c'; 2076 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress 2077 : Intrinsic::x86_avx512_mask_expand; 2078 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy); 2079 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1), 2080 MaskVec }); 2081 } else if (IsX86 && Name.startswith("xop.vpcom")) { 2082 bool IsSigned; 2083 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") || 2084 Name.endswith("uq")) 2085 IsSigned = false; 2086 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") || 2087 Name.endswith("q")) 2088 IsSigned = true; 2089 else 2090 llvm_unreachable("Unknown suffix"); 2091 2092 unsigned Imm; 2093 if (CI->getNumArgOperands() == 3) { 2094 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2095 } else { 2096 Name = Name.substr(9); // strip off "xop.vpcom" 2097 if (Name.startswith("lt")) 2098 Imm = 0; 2099 else if (Name.startswith("le")) 2100 Imm = 1; 2101 else if (Name.startswith("gt")) 2102 Imm = 2; 2103 else if (Name.startswith("ge")) 2104 Imm = 3; 2105 else if (Name.startswith("eq")) 2106 Imm = 4; 2107 else if (Name.startswith("ne")) 2108 Imm = 5; 2109 else if (Name.startswith("false")) 2110 Imm = 6; 2111 else if (Name.startswith("true")) 2112 Imm = 7; 2113 else 2114 llvm_unreachable("Unknown condition"); 2115 } 2116 2117 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned); 2118 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 2119 Value *Sel = CI->getArgOperand(2); 2120 Value *NotSel = Builder.CreateNot(Sel); 2121 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 2122 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 2123 Rep = Builder.CreateOr(Sel0, Sel1); 2124 } else if (IsX86 && (Name.startswith("xop.vprot") || 2125 Name.startswith("avx512.prol") || 2126 Name.startswith("avx512.mask.prol"))) { 2127 Rep = upgradeX86Rotate(Builder, *CI, false); 2128 } else if (IsX86 && (Name.startswith("avx512.pror") || 2129 Name.startswith("avx512.mask.pror"))) { 2130 Rep = upgradeX86Rotate(Builder, *CI, true); 2131 } else if (IsX86 && (Name.startswith("avx512.vpshld.") || 2132 Name.startswith("avx512.mask.vpshld") || 2133 Name.startswith("avx512.maskz.vpshld"))) { 2134 bool ZeroMask = Name[11] == 'z'; 2135 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask); 2136 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") || 2137 Name.startswith("avx512.mask.vpshrd") || 2138 Name.startswith("avx512.maskz.vpshrd"))) { 2139 bool ZeroMask = Name[11] == 'z'; 2140 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask); 2141 } else if (IsX86 && Name == "sse42.crc32.64.8") { 2142 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 2143 Intrinsic::x86_sse42_crc32_32_8); 2144 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 2145 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 2146 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 2147 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || 2148 Name.startswith("avx512.vbroadcast.s"))) { 2149 // Replace broadcasts with a series of insertelements. 2150 Type *VecTy = CI->getType(); 2151 Type *EltTy = VecTy->getVectorElementType(); 2152 unsigned EltNum = VecTy->getVectorNumElements(); 2153 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 2154 EltTy->getPointerTo()); 2155 Value *Load = Builder.CreateLoad(EltTy, Cast); 2156 Type *I32Ty = Type::getInt32Ty(C); 2157 Rep = UndefValue::get(VecTy); 2158 for (unsigned I = 0; I < EltNum; ++I) 2159 Rep = Builder.CreateInsertElement(Rep, Load, 2160 ConstantInt::get(I32Ty, I)); 2161 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 2162 Name.startswith("sse41.pmovzx") || 2163 Name.startswith("avx2.pmovsx") || 2164 Name.startswith("avx2.pmovzx") || 2165 Name.startswith("avx512.mask.pmovsx") || 2166 Name.startswith("avx512.mask.pmovzx"))) { 2167 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 2168 VectorType *DstTy = cast<VectorType>(CI->getType()); 2169 unsigned NumDstElts = DstTy->getNumElements(); 2170 2171 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 2172 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2173 for (unsigned i = 0; i != NumDstElts; ++i) 2174 ShuffleMask[i] = i; 2175 2176 Value *SV = Builder.CreateShuffleVector( 2177 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 2178 2179 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 2180 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 2181 : Builder.CreateZExt(SV, DstTy); 2182 // If there are 3 arguments, it's a masked intrinsic so we need a select. 2183 if (CI->getNumArgOperands() == 3) 2184 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2185 CI->getArgOperand(1)); 2186 } else if (Name == "avx512.mask.pmov.qd.256" || 2187 Name == "avx512.mask.pmov.qd.512" || 2188 Name == "avx512.mask.pmov.wb.256" || 2189 Name == "avx512.mask.pmov.wb.512") { 2190 Type *Ty = CI->getArgOperand(1)->getType(); 2191 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty); 2192 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2193 CI->getArgOperand(1)); 2194 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 2195 Name == "avx2.vbroadcasti128")) { 2196 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 2197 Type *EltTy = CI->getType()->getVectorElementType(); 2198 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 2199 Type *VT = VectorType::get(EltTy, NumSrcElts); 2200 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 2201 PointerType::getUnqual(VT)); 2202 Value *Load = Builder.CreateAlignedLoad(VT, Op, 1); 2203 if (NumSrcElts == 2) 2204 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2205 { 0, 1, 0, 1 }); 2206 else 2207 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2208 { 0, 1, 2, 3, 0, 1, 2, 3 }); 2209 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") || 2210 Name.startswith("avx512.mask.shuf.f"))) { 2211 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2212 Type *VT = CI->getType(); 2213 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128; 2214 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits(); 2215 unsigned ControlBitsMask = NumLanes - 1; 2216 unsigned NumControlBits = NumLanes / 2; 2217 SmallVector<uint32_t, 8> ShuffleMask(0); 2218 2219 for (unsigned l = 0; l != NumLanes; ++l) { 2220 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask; 2221 // We actually need the other source. 2222 if (l >= NumLanes / 2) 2223 LaneMask += NumLanes; 2224 for (unsigned i = 0; i != NumElementsInLane; ++i) 2225 ShuffleMask.push_back(LaneMask * NumElementsInLane + i); 2226 } 2227 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2228 CI->getArgOperand(1), ShuffleMask); 2229 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2230 CI->getArgOperand(3)); 2231 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || 2232 Name.startswith("avx512.mask.broadcasti"))) { 2233 unsigned NumSrcElts = 2234 CI->getArgOperand(0)->getType()->getVectorNumElements(); 2235 unsigned NumDstElts = CI->getType()->getVectorNumElements(); 2236 2237 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2238 for (unsigned i = 0; i != NumDstElts; ++i) 2239 ShuffleMask[i] = i % NumSrcElts; 2240 2241 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2242 CI->getArgOperand(0), 2243 ShuffleMask); 2244 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2245 CI->getArgOperand(1)); 2246 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 2247 Name.startswith("avx2.vbroadcast") || 2248 Name.startswith("avx512.pbroadcast") || 2249 Name.startswith("avx512.mask.broadcast.s"))) { 2250 // Replace vp?broadcasts with a vector shuffle. 2251 Value *Op = CI->getArgOperand(0); 2252 unsigned NumElts = CI->getType()->getVectorNumElements(); 2253 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 2254 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 2255 Constant::getNullValue(MaskTy)); 2256 2257 if (CI->getNumArgOperands() == 3) 2258 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2259 CI->getArgOperand(1)); 2260 } else if (IsX86 && (Name.startswith("sse2.padds.") || 2261 Name.startswith("sse2.psubs.") || 2262 Name.startswith("avx2.padds.") || 2263 Name.startswith("avx2.psubs.") || 2264 Name.startswith("avx512.padds.") || 2265 Name.startswith("avx512.psubs.") || 2266 Name.startswith("avx512.mask.padds.") || 2267 Name.startswith("avx512.mask.psubs."))) { 2268 bool IsAdd = Name.contains(".padds"); 2269 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, true, IsAdd); 2270 } else if (IsX86 && (Name.startswith("sse2.paddus.") || 2271 Name.startswith("sse2.psubus.") || 2272 Name.startswith("avx2.paddus.") || 2273 Name.startswith("avx2.psubus.") || 2274 Name.startswith("avx512.mask.paddus.") || 2275 Name.startswith("avx512.mask.psubus."))) { 2276 bool IsAdd = Name.contains(".paddus"); 2277 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, false, IsAdd); 2278 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 2279 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2280 CI->getArgOperand(1), 2281 CI->getArgOperand(2), 2282 CI->getArgOperand(3), 2283 CI->getArgOperand(4), 2284 false); 2285 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 2286 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2287 CI->getArgOperand(1), 2288 CI->getArgOperand(2), 2289 CI->getArgOperand(3), 2290 CI->getArgOperand(4), 2291 true); 2292 } else if (IsX86 && (Name == "sse2.psll.dq" || 2293 Name == "avx2.psll.dq")) { 2294 // 128/256-bit shift left specified in bits. 2295 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2296 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 2297 Shift / 8); // Shift is in bits. 2298 } else if (IsX86 && (Name == "sse2.psrl.dq" || 2299 Name == "avx2.psrl.dq")) { 2300 // 128/256-bit shift right specified in bits. 2301 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2302 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 2303 Shift / 8); // Shift is in bits. 2304 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 2305 Name == "avx2.psll.dq.bs" || 2306 Name == "avx512.psll.dq.512")) { 2307 // 128/256/512-bit shift left specified in bytes. 2308 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2309 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2310 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 2311 Name == "avx2.psrl.dq.bs" || 2312 Name == "avx512.psrl.dq.512")) { 2313 // 128/256/512-bit shift right specified in bytes. 2314 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2315 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2316 } else if (IsX86 && (Name == "sse41.pblendw" || 2317 Name.startswith("sse41.blendp") || 2318 Name.startswith("avx.blend.p") || 2319 Name == "avx2.pblendw" || 2320 Name.startswith("avx2.pblendd."))) { 2321 Value *Op0 = CI->getArgOperand(0); 2322 Value *Op1 = CI->getArgOperand(1); 2323 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2324 VectorType *VecTy = cast<VectorType>(CI->getType()); 2325 unsigned NumElts = VecTy->getNumElements(); 2326 2327 SmallVector<uint32_t, 16> Idxs(NumElts); 2328 for (unsigned i = 0; i != NumElts; ++i) 2329 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 2330 2331 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2332 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 2333 Name == "avx2.vinserti128" || 2334 Name.startswith("avx512.mask.insert"))) { 2335 Value *Op0 = CI->getArgOperand(0); 2336 Value *Op1 = CI->getArgOperand(1); 2337 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2338 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2339 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 2340 unsigned Scale = DstNumElts / SrcNumElts; 2341 2342 // Mask off the high bits of the immediate value; hardware ignores those. 2343 Imm = Imm % Scale; 2344 2345 // Extend the second operand into a vector the size of the destination. 2346 Value *UndefV = UndefValue::get(Op1->getType()); 2347 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2348 for (unsigned i = 0; i != SrcNumElts; ++i) 2349 Idxs[i] = i; 2350 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 2351 Idxs[i] = SrcNumElts; 2352 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 2353 2354 // Insert the second operand into the first operand. 2355 2356 // Note that there is no guarantee that instruction lowering will actually 2357 // produce a vinsertf128 instruction for the created shuffles. In 2358 // particular, the 0 immediate case involves no lane changes, so it can 2359 // be handled as a blend. 2360 2361 // Example of shuffle mask for 32-bit elements: 2362 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 2363 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 2364 2365 // First fill with identify mask. 2366 for (unsigned i = 0; i != DstNumElts; ++i) 2367 Idxs[i] = i; 2368 // Then replace the elements where we need to insert. 2369 for (unsigned i = 0; i != SrcNumElts; ++i) 2370 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 2371 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 2372 2373 // If the intrinsic has a mask operand, handle that. 2374 if (CI->getNumArgOperands() == 5) 2375 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2376 CI->getArgOperand(3)); 2377 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 2378 Name == "avx2.vextracti128" || 2379 Name.startswith("avx512.mask.vextract"))) { 2380 Value *Op0 = CI->getArgOperand(0); 2381 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2382 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2383 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 2384 unsigned Scale = SrcNumElts / DstNumElts; 2385 2386 // Mask off the high bits of the immediate value; hardware ignores those. 2387 Imm = Imm % Scale; 2388 2389 // Get indexes for the subvector of the input vector. 2390 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2391 for (unsigned i = 0; i != DstNumElts; ++i) { 2392 Idxs[i] = i + (Imm * DstNumElts); 2393 } 2394 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2395 2396 // If the intrinsic has a mask operand, handle that. 2397 if (CI->getNumArgOperands() == 4) 2398 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2399 CI->getArgOperand(2)); 2400 } else if (!IsX86 && Name == "stackprotectorcheck") { 2401 Rep = nullptr; 2402 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 2403 Name.startswith("avx512.mask.perm.di."))) { 2404 Value *Op0 = CI->getArgOperand(0); 2405 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2406 VectorType *VecTy = cast<VectorType>(CI->getType()); 2407 unsigned NumElts = VecTy->getNumElements(); 2408 2409 SmallVector<uint32_t, 8> Idxs(NumElts); 2410 for (unsigned i = 0; i != NumElts; ++i) 2411 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 2412 2413 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2414 2415 if (CI->getNumArgOperands() == 4) 2416 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2417 CI->getArgOperand(2)); 2418 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") || 2419 Name == "avx2.vperm2i128")) { 2420 // The immediate permute control byte looks like this: 2421 // [1:0] - select 128 bits from sources for low half of destination 2422 // [2] - ignore 2423 // [3] - zero low half of destination 2424 // [5:4] - select 128 bits from sources for high half of destination 2425 // [6] - ignore 2426 // [7] - zero high half of destination 2427 2428 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2429 2430 unsigned NumElts = CI->getType()->getVectorNumElements(); 2431 unsigned HalfSize = NumElts / 2; 2432 SmallVector<uint32_t, 8> ShuffleMask(NumElts); 2433 2434 // Determine which operand(s) are actually in use for this instruction. 2435 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2436 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2437 2438 // If needed, replace operands based on zero mask. 2439 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0; 2440 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1; 2441 2442 // Permute low half of result. 2443 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0; 2444 for (unsigned i = 0; i < HalfSize; ++i) 2445 ShuffleMask[i] = StartIndex + i; 2446 2447 // Permute high half of result. 2448 StartIndex = (Imm & 0x10) ? HalfSize : 0; 2449 for (unsigned i = 0; i < HalfSize; ++i) 2450 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i; 2451 2452 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2453 2454 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 2455 Name == "sse2.pshuf.d" || 2456 Name.startswith("avx512.mask.vpermil.p") || 2457 Name.startswith("avx512.mask.pshuf.d."))) { 2458 Value *Op0 = CI->getArgOperand(0); 2459 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2460 VectorType *VecTy = cast<VectorType>(CI->getType()); 2461 unsigned NumElts = VecTy->getNumElements(); 2462 // Calculate the size of each index in the immediate. 2463 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 2464 unsigned IdxMask = ((1 << IdxSize) - 1); 2465 2466 SmallVector<uint32_t, 8> Idxs(NumElts); 2467 // Lookup the bits for this element, wrapping around the immediate every 2468 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 2469 // to offset by the first index of each group. 2470 for (unsigned i = 0; i != NumElts; ++i) 2471 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 2472 2473 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2474 2475 if (CI->getNumArgOperands() == 4) 2476 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2477 CI->getArgOperand(2)); 2478 } else if (IsX86 && (Name == "sse2.pshufl.w" || 2479 Name.startswith("avx512.mask.pshufl.w."))) { 2480 Value *Op0 = CI->getArgOperand(0); 2481 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2482 unsigned NumElts = CI->getType()->getVectorNumElements(); 2483 2484 SmallVector<uint32_t, 16> Idxs(NumElts); 2485 for (unsigned l = 0; l != NumElts; l += 8) { 2486 for (unsigned i = 0; i != 4; ++i) 2487 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 2488 for (unsigned i = 4; i != 8; ++i) 2489 Idxs[i + l] = i + l; 2490 } 2491 2492 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2493 2494 if (CI->getNumArgOperands() == 4) 2495 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2496 CI->getArgOperand(2)); 2497 } else if (IsX86 && (Name == "sse2.pshufh.w" || 2498 Name.startswith("avx512.mask.pshufh.w."))) { 2499 Value *Op0 = CI->getArgOperand(0); 2500 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2501 unsigned NumElts = CI->getType()->getVectorNumElements(); 2502 2503 SmallVector<uint32_t, 16> Idxs(NumElts); 2504 for (unsigned l = 0; l != NumElts; l += 8) { 2505 for (unsigned i = 0; i != 4; ++i) 2506 Idxs[i + l] = i + l; 2507 for (unsigned i = 0; i != 4; ++i) 2508 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 2509 } 2510 2511 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2512 2513 if (CI->getNumArgOperands() == 4) 2514 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2515 CI->getArgOperand(2)); 2516 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 2517 Value *Op0 = CI->getArgOperand(0); 2518 Value *Op1 = CI->getArgOperand(1); 2519 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2520 unsigned NumElts = CI->getType()->getVectorNumElements(); 2521 2522 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2523 unsigned HalfLaneElts = NumLaneElts / 2; 2524 2525 SmallVector<uint32_t, 16> Idxs(NumElts); 2526 for (unsigned i = 0; i != NumElts; ++i) { 2527 // Base index is the starting element of the lane. 2528 Idxs[i] = i - (i % NumLaneElts); 2529 // If we are half way through the lane switch to the other source. 2530 if ((i % NumLaneElts) >= HalfLaneElts) 2531 Idxs[i] += NumElts; 2532 // Now select the specific element. By adding HalfLaneElts bits from 2533 // the immediate. Wrapping around the immediate every 8-bits. 2534 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 2535 } 2536 2537 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2538 2539 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2540 CI->getArgOperand(3)); 2541 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 2542 Name.startswith("avx512.mask.movshdup") || 2543 Name.startswith("avx512.mask.movsldup"))) { 2544 Value *Op0 = CI->getArgOperand(0); 2545 unsigned NumElts = CI->getType()->getVectorNumElements(); 2546 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2547 2548 unsigned Offset = 0; 2549 if (Name.startswith("avx512.mask.movshdup.")) 2550 Offset = 1; 2551 2552 SmallVector<uint32_t, 16> Idxs(NumElts); 2553 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 2554 for (unsigned i = 0; i != NumLaneElts; i += 2) { 2555 Idxs[i + l + 0] = i + l + Offset; 2556 Idxs[i + l + 1] = i + l + Offset; 2557 } 2558 2559 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2560 2561 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2562 CI->getArgOperand(1)); 2563 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 2564 Name.startswith("avx512.mask.unpckl."))) { 2565 Value *Op0 = CI->getArgOperand(0); 2566 Value *Op1 = CI->getArgOperand(1); 2567 int NumElts = CI->getType()->getVectorNumElements(); 2568 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2569 2570 SmallVector<uint32_t, 64> Idxs(NumElts); 2571 for (int l = 0; l != NumElts; l += NumLaneElts) 2572 for (int i = 0; i != NumLaneElts; ++i) 2573 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 2574 2575 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2576 2577 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2578 CI->getArgOperand(2)); 2579 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 2580 Name.startswith("avx512.mask.unpckh."))) { 2581 Value *Op0 = CI->getArgOperand(0); 2582 Value *Op1 = CI->getArgOperand(1); 2583 int NumElts = CI->getType()->getVectorNumElements(); 2584 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2585 2586 SmallVector<uint32_t, 64> Idxs(NumElts); 2587 for (int l = 0; l != NumElts; l += NumLaneElts) 2588 for (int i = 0; i != NumLaneElts; ++i) 2589 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 2590 2591 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2592 2593 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2594 CI->getArgOperand(2)); 2595 } else if (IsX86 && (Name.startswith("avx512.mask.and.") || 2596 Name.startswith("avx512.mask.pand."))) { 2597 VectorType *FTy = cast<VectorType>(CI->getType()); 2598 VectorType *ITy = VectorType::getInteger(FTy); 2599 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2600 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2601 Rep = Builder.CreateBitCast(Rep, FTy); 2602 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2603 CI->getArgOperand(2)); 2604 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") || 2605 Name.startswith("avx512.mask.pandn."))) { 2606 VectorType *FTy = cast<VectorType>(CI->getType()); 2607 VectorType *ITy = VectorType::getInteger(FTy); 2608 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 2609 Rep = Builder.CreateAnd(Rep, 2610 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2611 Rep = Builder.CreateBitCast(Rep, FTy); 2612 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2613 CI->getArgOperand(2)); 2614 } else if (IsX86 && (Name.startswith("avx512.mask.or.") || 2615 Name.startswith("avx512.mask.por."))) { 2616 VectorType *FTy = cast<VectorType>(CI->getType()); 2617 VectorType *ITy = VectorType::getInteger(FTy); 2618 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2619 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2620 Rep = Builder.CreateBitCast(Rep, FTy); 2621 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2622 CI->getArgOperand(2)); 2623 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") || 2624 Name.startswith("avx512.mask.pxor."))) { 2625 VectorType *FTy = cast<VectorType>(CI->getType()); 2626 VectorType *ITy = VectorType::getInteger(FTy); 2627 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2628 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2629 Rep = Builder.CreateBitCast(Rep, FTy); 2630 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2631 CI->getArgOperand(2)); 2632 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 2633 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2634 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2635 CI->getArgOperand(2)); 2636 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 2637 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2638 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2639 CI->getArgOperand(2)); 2640 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 2641 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2642 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2643 CI->getArgOperand(2)); 2644 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) { 2645 if (Name.endswith(".512")) { 2646 Intrinsic::ID IID; 2647 if (Name[17] == 's') 2648 IID = Intrinsic::x86_avx512_add_ps_512; 2649 else 2650 IID = Intrinsic::x86_avx512_add_pd_512; 2651 2652 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2653 { CI->getArgOperand(0), CI->getArgOperand(1), 2654 CI->getArgOperand(4) }); 2655 } else { 2656 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2657 } 2658 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2659 CI->getArgOperand(2)); 2660 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 2661 if (Name.endswith(".512")) { 2662 Intrinsic::ID IID; 2663 if (Name[17] == 's') 2664 IID = Intrinsic::x86_avx512_div_ps_512; 2665 else 2666 IID = Intrinsic::x86_avx512_div_pd_512; 2667 2668 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2669 { CI->getArgOperand(0), CI->getArgOperand(1), 2670 CI->getArgOperand(4) }); 2671 } else { 2672 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 2673 } 2674 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2675 CI->getArgOperand(2)); 2676 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 2677 if (Name.endswith(".512")) { 2678 Intrinsic::ID IID; 2679 if (Name[17] == 's') 2680 IID = Intrinsic::x86_avx512_mul_ps_512; 2681 else 2682 IID = Intrinsic::x86_avx512_mul_pd_512; 2683 2684 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2685 { CI->getArgOperand(0), CI->getArgOperand(1), 2686 CI->getArgOperand(4) }); 2687 } else { 2688 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2689 } 2690 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2691 CI->getArgOperand(2)); 2692 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 2693 if (Name.endswith(".512")) { 2694 Intrinsic::ID IID; 2695 if (Name[17] == 's') 2696 IID = Intrinsic::x86_avx512_sub_ps_512; 2697 else 2698 IID = Intrinsic::x86_avx512_sub_pd_512; 2699 2700 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2701 { CI->getArgOperand(0), CI->getArgOperand(1), 2702 CI->getArgOperand(4) }); 2703 } else { 2704 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2705 } 2706 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2707 CI->getArgOperand(2)); 2708 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") || 2709 Name.startswith("avx512.mask.min.p")) && 2710 Name.drop_front(18) == ".512") { 2711 bool IsDouble = Name[17] == 'd'; 2712 bool IsMin = Name[13] == 'i'; 2713 static const Intrinsic::ID MinMaxTbl[2][2] = { 2714 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 }, 2715 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 } 2716 }; 2717 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble]; 2718 2719 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2720 { CI->getArgOperand(0), CI->getArgOperand(1), 2721 CI->getArgOperand(4) }); 2722 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2723 CI->getArgOperand(2)); 2724 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 2725 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 2726 Intrinsic::ctlz, 2727 CI->getType()), 2728 { CI->getArgOperand(0), Builder.getInt1(false) }); 2729 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2730 CI->getArgOperand(1)); 2731 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 2732 bool IsImmediate = Name[16] == 'i' || 2733 (Name.size() > 18 && Name[18] == 'i'); 2734 bool IsVariable = Name[16] == 'v'; 2735 char Size = Name[16] == '.' ? Name[17] : 2736 Name[17] == '.' ? Name[18] : 2737 Name[18] == '.' ? Name[19] : 2738 Name[20]; 2739 2740 Intrinsic::ID IID; 2741 if (IsVariable && Name[17] != '.') { 2742 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 2743 IID = Intrinsic::x86_avx2_psllv_q; 2744 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 2745 IID = Intrinsic::x86_avx2_psllv_q_256; 2746 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 2747 IID = Intrinsic::x86_avx2_psllv_d; 2748 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 2749 IID = Intrinsic::x86_avx2_psllv_d_256; 2750 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 2751 IID = Intrinsic::x86_avx512_psllv_w_128; 2752 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 2753 IID = Intrinsic::x86_avx512_psllv_w_256; 2754 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 2755 IID = Intrinsic::x86_avx512_psllv_w_512; 2756 else 2757 llvm_unreachable("Unexpected size"); 2758 } else if (Name.endswith(".128")) { 2759 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 2760 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 2761 : Intrinsic::x86_sse2_psll_d; 2762 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 2763 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 2764 : Intrinsic::x86_sse2_psll_q; 2765 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 2766 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 2767 : Intrinsic::x86_sse2_psll_w; 2768 else 2769 llvm_unreachable("Unexpected size"); 2770 } else if (Name.endswith(".256")) { 2771 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 2772 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 2773 : Intrinsic::x86_avx2_psll_d; 2774 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 2775 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 2776 : Intrinsic::x86_avx2_psll_q; 2777 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 2778 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 2779 : Intrinsic::x86_avx2_psll_w; 2780 else 2781 llvm_unreachable("Unexpected size"); 2782 } else { 2783 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 2784 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 2785 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 2786 Intrinsic::x86_avx512_psll_d_512; 2787 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 2788 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 2789 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 2790 Intrinsic::x86_avx512_psll_q_512; 2791 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 2792 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 2793 : Intrinsic::x86_avx512_psll_w_512; 2794 else 2795 llvm_unreachable("Unexpected size"); 2796 } 2797 2798 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2799 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 2800 bool IsImmediate = Name[16] == 'i' || 2801 (Name.size() > 18 && Name[18] == 'i'); 2802 bool IsVariable = Name[16] == 'v'; 2803 char Size = Name[16] == '.' ? Name[17] : 2804 Name[17] == '.' ? Name[18] : 2805 Name[18] == '.' ? Name[19] : 2806 Name[20]; 2807 2808 Intrinsic::ID IID; 2809 if (IsVariable && Name[17] != '.') { 2810 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 2811 IID = Intrinsic::x86_avx2_psrlv_q; 2812 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 2813 IID = Intrinsic::x86_avx2_psrlv_q_256; 2814 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 2815 IID = Intrinsic::x86_avx2_psrlv_d; 2816 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 2817 IID = Intrinsic::x86_avx2_psrlv_d_256; 2818 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 2819 IID = Intrinsic::x86_avx512_psrlv_w_128; 2820 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 2821 IID = Intrinsic::x86_avx512_psrlv_w_256; 2822 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 2823 IID = Intrinsic::x86_avx512_psrlv_w_512; 2824 else 2825 llvm_unreachable("Unexpected size"); 2826 } else if (Name.endswith(".128")) { 2827 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 2828 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 2829 : Intrinsic::x86_sse2_psrl_d; 2830 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 2831 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 2832 : Intrinsic::x86_sse2_psrl_q; 2833 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 2834 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 2835 : Intrinsic::x86_sse2_psrl_w; 2836 else 2837 llvm_unreachable("Unexpected size"); 2838 } else if (Name.endswith(".256")) { 2839 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 2840 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 2841 : Intrinsic::x86_avx2_psrl_d; 2842 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 2843 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 2844 : Intrinsic::x86_avx2_psrl_q; 2845 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 2846 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 2847 : Intrinsic::x86_avx2_psrl_w; 2848 else 2849 llvm_unreachable("Unexpected size"); 2850 } else { 2851 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 2852 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 2853 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 2854 Intrinsic::x86_avx512_psrl_d_512; 2855 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 2856 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 2857 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 2858 Intrinsic::x86_avx512_psrl_q_512; 2859 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 2860 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 2861 : Intrinsic::x86_avx512_psrl_w_512; 2862 else 2863 llvm_unreachable("Unexpected size"); 2864 } 2865 2866 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2867 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 2868 bool IsImmediate = Name[16] == 'i' || 2869 (Name.size() > 18 && Name[18] == 'i'); 2870 bool IsVariable = Name[16] == 'v'; 2871 char Size = Name[16] == '.' ? Name[17] : 2872 Name[17] == '.' ? Name[18] : 2873 Name[18] == '.' ? Name[19] : 2874 Name[20]; 2875 2876 Intrinsic::ID IID; 2877 if (IsVariable && Name[17] != '.') { 2878 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 2879 IID = Intrinsic::x86_avx2_psrav_d; 2880 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 2881 IID = Intrinsic::x86_avx2_psrav_d_256; 2882 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 2883 IID = Intrinsic::x86_avx512_psrav_w_128; 2884 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 2885 IID = Intrinsic::x86_avx512_psrav_w_256; 2886 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 2887 IID = Intrinsic::x86_avx512_psrav_w_512; 2888 else 2889 llvm_unreachable("Unexpected size"); 2890 } else if (Name.endswith(".128")) { 2891 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 2892 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 2893 : Intrinsic::x86_sse2_psra_d; 2894 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 2895 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 2896 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 2897 Intrinsic::x86_avx512_psra_q_128; 2898 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 2899 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 2900 : Intrinsic::x86_sse2_psra_w; 2901 else 2902 llvm_unreachable("Unexpected size"); 2903 } else if (Name.endswith(".256")) { 2904 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 2905 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 2906 : Intrinsic::x86_avx2_psra_d; 2907 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 2908 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 2909 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 2910 Intrinsic::x86_avx512_psra_q_256; 2911 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 2912 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 2913 : Intrinsic::x86_avx2_psra_w; 2914 else 2915 llvm_unreachable("Unexpected size"); 2916 } else { 2917 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 2918 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 2919 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 2920 Intrinsic::x86_avx512_psra_d_512; 2921 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 2922 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 2923 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 2924 Intrinsic::x86_avx512_psra_q_512; 2925 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 2926 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 2927 : Intrinsic::x86_avx512_psra_w_512; 2928 else 2929 llvm_unreachable("Unexpected size"); 2930 } 2931 2932 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2933 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 2934 Rep = upgradeMaskedMove(Builder, *CI); 2935 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 2936 Rep = UpgradeMaskToInt(Builder, *CI); 2937 } else if (IsX86 && Name.endswith(".movntdqa")) { 2938 Module *M = F->getParent(); 2939 MDNode *Node = MDNode::get( 2940 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 2941 2942 Value *Ptr = CI->getArgOperand(0); 2943 VectorType *VTy = cast<VectorType>(CI->getType()); 2944 2945 // Convert the type of the pointer to a pointer to the stored type. 2946 Value *BC = 2947 Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); 2948 LoadInst *LI = Builder.CreateAlignedLoad(VTy, BC, VTy->getBitWidth() / 8); 2949 LI->setMetadata(M->getMDKindID("nontemporal"), Node); 2950 Rep = LI; 2951 } else if (IsX86 && 2952 (Name.startswith("sse2.pavg") || Name.startswith("avx2.pavg") || 2953 Name.startswith("avx512.mask.pavg"))) { 2954 // llvm.x86.sse2.pavg.b/w, llvm.x86.avx2.pavg.b/w, 2955 // llvm.x86.avx512.mask.pavg.b/w 2956 Value *A = CI->getArgOperand(0); 2957 Value *B = CI->getArgOperand(1); 2958 VectorType *ZextType = VectorType::getExtendedElementVectorType( 2959 cast<VectorType>(A->getType())); 2960 Value *ExtendedA = Builder.CreateZExt(A, ZextType); 2961 Value *ExtendedB = Builder.CreateZExt(B, ZextType); 2962 Value *Sum = Builder.CreateAdd(ExtendedA, ExtendedB); 2963 Value *AddOne = Builder.CreateAdd(Sum, ConstantInt::get(ZextType, 1)); 2964 Value *ShiftR = Builder.CreateLShr(AddOne, ConstantInt::get(ZextType, 1)); 2965 Rep = Builder.CreateTrunc(ShiftR, A->getType()); 2966 if (CI->getNumArgOperands() > 2) { 2967 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2968 CI->getArgOperand(2)); 2969 } 2970 } else if (IsX86 && (Name.startswith("fma.vfmadd.") || 2971 Name.startswith("fma.vfmsub.") || 2972 Name.startswith("fma.vfnmadd.") || 2973 Name.startswith("fma.vfnmsub."))) { 2974 bool NegMul = Name[6] == 'n'; 2975 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's'; 2976 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's'; 2977 2978 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2979 CI->getArgOperand(2) }; 2980 2981 if (IsScalar) { 2982 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 2983 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 2984 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 2985 } 2986 2987 if (NegMul && !IsScalar) 2988 Ops[0] = Builder.CreateFNeg(Ops[0]); 2989 if (NegMul && IsScalar) 2990 Ops[1] = Builder.CreateFNeg(Ops[1]); 2991 if (NegAcc) 2992 Ops[2] = Builder.CreateFNeg(Ops[2]); 2993 2994 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 2995 Intrinsic::fma, 2996 Ops[0]->getType()), 2997 Ops); 2998 2999 if (IsScalar) 3000 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, 3001 (uint64_t)0); 3002 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) { 3003 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3004 CI->getArgOperand(2) }; 3005 3006 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 3007 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 3008 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 3009 3010 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 3011 Intrinsic::fma, 3012 Ops[0]->getType()), 3013 Ops); 3014 3015 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()), 3016 Rep, (uint64_t)0); 3017 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") || 3018 Name.startswith("avx512.maskz.vfmadd.s") || 3019 Name.startswith("avx512.mask3.vfmadd.s") || 3020 Name.startswith("avx512.mask3.vfmsub.s") || 3021 Name.startswith("avx512.mask3.vfnmsub.s"))) { 3022 bool IsMask3 = Name[11] == '3'; 3023 bool IsMaskZ = Name[11] == 'z'; 3024 // Drop the "avx512.mask." to make it easier. 3025 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3026 bool NegMul = Name[2] == 'n'; 3027 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3028 3029 Value *A = CI->getArgOperand(0); 3030 Value *B = CI->getArgOperand(1); 3031 Value *C = CI->getArgOperand(2); 3032 3033 if (NegMul && (IsMask3 || IsMaskZ)) 3034 A = Builder.CreateFNeg(A); 3035 if (NegMul && !(IsMask3 || IsMaskZ)) 3036 B = Builder.CreateFNeg(B); 3037 if (NegAcc) 3038 C = Builder.CreateFNeg(C); 3039 3040 A = Builder.CreateExtractElement(A, (uint64_t)0); 3041 B = Builder.CreateExtractElement(B, (uint64_t)0); 3042 C = Builder.CreateExtractElement(C, (uint64_t)0); 3043 3044 if (!isa<ConstantInt>(CI->getArgOperand(4)) || 3045 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) { 3046 Value *Ops[] = { A, B, C, CI->getArgOperand(4) }; 3047 3048 Intrinsic::ID IID; 3049 if (Name.back() == 'd') 3050 IID = Intrinsic::x86_avx512_vfmadd_f64; 3051 else 3052 IID = Intrinsic::x86_avx512_vfmadd_f32; 3053 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID); 3054 Rep = Builder.CreateCall(FMA, Ops); 3055 } else { 3056 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3057 Intrinsic::fma, 3058 A->getType()); 3059 Rep = Builder.CreateCall(FMA, { A, B, C }); 3060 } 3061 3062 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) : 3063 IsMask3 ? C : A; 3064 3065 // For Mask3 with NegAcc, we need to create a new extractelement that 3066 // avoids the negation above. 3067 if (NegAcc && IsMask3) 3068 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2), 3069 (uint64_t)0); 3070 3071 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3), 3072 Rep, PassThru); 3073 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0), 3074 Rep, (uint64_t)0); 3075 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") || 3076 Name.startswith("avx512.mask.vfnmadd.p") || 3077 Name.startswith("avx512.mask.vfnmsub.p") || 3078 Name.startswith("avx512.mask3.vfmadd.p") || 3079 Name.startswith("avx512.mask3.vfmsub.p") || 3080 Name.startswith("avx512.mask3.vfnmsub.p") || 3081 Name.startswith("avx512.maskz.vfmadd.p"))) { 3082 bool IsMask3 = Name[11] == '3'; 3083 bool IsMaskZ = Name[11] == 'z'; 3084 // Drop the "avx512.mask." to make it easier. 3085 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3086 bool NegMul = Name[2] == 'n'; 3087 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3088 3089 Value *A = CI->getArgOperand(0); 3090 Value *B = CI->getArgOperand(1); 3091 Value *C = CI->getArgOperand(2); 3092 3093 if (NegMul && (IsMask3 || IsMaskZ)) 3094 A = Builder.CreateFNeg(A); 3095 if (NegMul && !(IsMask3 || IsMaskZ)) 3096 B = Builder.CreateFNeg(B); 3097 if (NegAcc) 3098 C = Builder.CreateFNeg(C); 3099 3100 if (CI->getNumArgOperands() == 5 && 3101 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3102 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3103 Intrinsic::ID IID; 3104 // Check the character before ".512" in string. 3105 if (Name[Name.size()-5] == 's') 3106 IID = Intrinsic::x86_avx512_vfmadd_ps_512; 3107 else 3108 IID = Intrinsic::x86_avx512_vfmadd_pd_512; 3109 3110 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3111 { A, B, C, CI->getArgOperand(4) }); 3112 } else { 3113 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3114 Intrinsic::fma, 3115 A->getType()); 3116 Rep = Builder.CreateCall(FMA, { A, B, C }); 3117 } 3118 3119 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3120 IsMask3 ? CI->getArgOperand(2) : 3121 CI->getArgOperand(0); 3122 3123 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3124 } else if (IsX86 && (Name.startswith("fma.vfmaddsub.p") || 3125 Name.startswith("fma.vfmsubadd.p"))) { 3126 bool IsSubAdd = Name[7] == 's'; 3127 int NumElts = CI->getType()->getVectorNumElements(); 3128 3129 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3130 CI->getArgOperand(2) }; 3131 3132 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3133 Ops[0]->getType()); 3134 Value *Odd = Builder.CreateCall(FMA, Ops); 3135 Ops[2] = Builder.CreateFNeg(Ops[2]); 3136 Value *Even = Builder.CreateCall(FMA, Ops); 3137 3138 if (IsSubAdd) 3139 std::swap(Even, Odd); 3140 3141 SmallVector<uint32_t, 32> Idxs(NumElts); 3142 for (int i = 0; i != NumElts; ++i) 3143 Idxs[i] = i + (i % 2) * NumElts; 3144 3145 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3146 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") || 3147 Name.startswith("avx512.mask3.vfmaddsub.p") || 3148 Name.startswith("avx512.maskz.vfmaddsub.p") || 3149 Name.startswith("avx512.mask3.vfmsubadd.p"))) { 3150 bool IsMask3 = Name[11] == '3'; 3151 bool IsMaskZ = Name[11] == 'z'; 3152 // Drop the "avx512.mask." to make it easier. 3153 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3154 bool IsSubAdd = Name[3] == 's'; 3155 if (CI->getNumArgOperands() == 5 && 3156 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3157 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3158 Intrinsic::ID IID; 3159 // Check the character before ".512" in string. 3160 if (Name[Name.size()-5] == 's') 3161 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512; 3162 else 3163 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512; 3164 3165 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3166 CI->getArgOperand(2), CI->getArgOperand(4) }; 3167 if (IsSubAdd) 3168 Ops[2] = Builder.CreateFNeg(Ops[2]); 3169 3170 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3171 {CI->getArgOperand(0), CI->getArgOperand(1), 3172 CI->getArgOperand(2), CI->getArgOperand(4)}); 3173 } else { 3174 int NumElts = CI->getType()->getVectorNumElements(); 3175 3176 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3177 CI->getArgOperand(2) }; 3178 3179 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3180 Ops[0]->getType()); 3181 Value *Odd = Builder.CreateCall(FMA, Ops); 3182 Ops[2] = Builder.CreateFNeg(Ops[2]); 3183 Value *Even = Builder.CreateCall(FMA, Ops); 3184 3185 if (IsSubAdd) 3186 std::swap(Even, Odd); 3187 3188 SmallVector<uint32_t, 32> Idxs(NumElts); 3189 for (int i = 0; i != NumElts; ++i) 3190 Idxs[i] = i + (i % 2) * NumElts; 3191 3192 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3193 } 3194 3195 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3196 IsMask3 ? CI->getArgOperand(2) : 3197 CI->getArgOperand(0); 3198 3199 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3200 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") || 3201 Name.startswith("avx512.maskz.pternlog."))) { 3202 bool ZeroMask = Name[11] == 'z'; 3203 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3204 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 3205 Intrinsic::ID IID; 3206 if (VecWidth == 128 && EltWidth == 32) 3207 IID = Intrinsic::x86_avx512_pternlog_d_128; 3208 else if (VecWidth == 256 && EltWidth == 32) 3209 IID = Intrinsic::x86_avx512_pternlog_d_256; 3210 else if (VecWidth == 512 && EltWidth == 32) 3211 IID = Intrinsic::x86_avx512_pternlog_d_512; 3212 else if (VecWidth == 128 && EltWidth == 64) 3213 IID = Intrinsic::x86_avx512_pternlog_q_128; 3214 else if (VecWidth == 256 && EltWidth == 64) 3215 IID = Intrinsic::x86_avx512_pternlog_q_256; 3216 else if (VecWidth == 512 && EltWidth == 64) 3217 IID = Intrinsic::x86_avx512_pternlog_q_512; 3218 else 3219 llvm_unreachable("Unexpected intrinsic"); 3220 3221 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3222 CI->getArgOperand(2), CI->getArgOperand(3) }; 3223 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3224 Args); 3225 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3226 : CI->getArgOperand(0); 3227 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru); 3228 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") || 3229 Name.startswith("avx512.maskz.vpmadd52"))) { 3230 bool ZeroMask = Name[11] == 'z'; 3231 bool High = Name[20] == 'h' || Name[21] == 'h'; 3232 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3233 Intrinsic::ID IID; 3234 if (VecWidth == 128 && !High) 3235 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128; 3236 else if (VecWidth == 256 && !High) 3237 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256; 3238 else if (VecWidth == 512 && !High) 3239 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512; 3240 else if (VecWidth == 128 && High) 3241 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128; 3242 else if (VecWidth == 256 && High) 3243 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256; 3244 else if (VecWidth == 512 && High) 3245 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512; 3246 else 3247 llvm_unreachable("Unexpected intrinsic"); 3248 3249 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3250 CI->getArgOperand(2) }; 3251 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3252 Args); 3253 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3254 : CI->getArgOperand(0); 3255 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3256 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") || 3257 Name.startswith("avx512.mask.vpermt2var.") || 3258 Name.startswith("avx512.maskz.vpermt2var."))) { 3259 bool ZeroMask = Name[11] == 'z'; 3260 bool IndexForm = Name[17] == 'i'; 3261 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm); 3262 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") || 3263 Name.startswith("avx512.maskz.vpdpbusd.") || 3264 Name.startswith("avx512.mask.vpdpbusds.") || 3265 Name.startswith("avx512.maskz.vpdpbusds."))) { 3266 bool ZeroMask = Name[11] == 'z'; 3267 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3268 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3269 Intrinsic::ID IID; 3270 if (VecWidth == 128 && !IsSaturating) 3271 IID = Intrinsic::x86_avx512_vpdpbusd_128; 3272 else if (VecWidth == 256 && !IsSaturating) 3273 IID = Intrinsic::x86_avx512_vpdpbusd_256; 3274 else if (VecWidth == 512 && !IsSaturating) 3275 IID = Intrinsic::x86_avx512_vpdpbusd_512; 3276 else if (VecWidth == 128 && IsSaturating) 3277 IID = Intrinsic::x86_avx512_vpdpbusds_128; 3278 else if (VecWidth == 256 && IsSaturating) 3279 IID = Intrinsic::x86_avx512_vpdpbusds_256; 3280 else if (VecWidth == 512 && IsSaturating) 3281 IID = Intrinsic::x86_avx512_vpdpbusds_512; 3282 else 3283 llvm_unreachable("Unexpected intrinsic"); 3284 3285 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3286 CI->getArgOperand(2) }; 3287 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3288 Args); 3289 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3290 : CI->getArgOperand(0); 3291 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3292 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") || 3293 Name.startswith("avx512.maskz.vpdpwssd.") || 3294 Name.startswith("avx512.mask.vpdpwssds.") || 3295 Name.startswith("avx512.maskz.vpdpwssds."))) { 3296 bool ZeroMask = Name[11] == 'z'; 3297 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3298 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3299 Intrinsic::ID IID; 3300 if (VecWidth == 128 && !IsSaturating) 3301 IID = Intrinsic::x86_avx512_vpdpwssd_128; 3302 else if (VecWidth == 256 && !IsSaturating) 3303 IID = Intrinsic::x86_avx512_vpdpwssd_256; 3304 else if (VecWidth == 512 && !IsSaturating) 3305 IID = Intrinsic::x86_avx512_vpdpwssd_512; 3306 else if (VecWidth == 128 && IsSaturating) 3307 IID = Intrinsic::x86_avx512_vpdpwssds_128; 3308 else if (VecWidth == 256 && IsSaturating) 3309 IID = Intrinsic::x86_avx512_vpdpwssds_256; 3310 else if (VecWidth == 512 && IsSaturating) 3311 IID = Intrinsic::x86_avx512_vpdpwssds_512; 3312 else 3313 llvm_unreachable("Unexpected intrinsic"); 3314 3315 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3316 CI->getArgOperand(2) }; 3317 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3318 Args); 3319 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3320 : CI->getArgOperand(0); 3321 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3322 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" || 3323 Name == "addcarry.u32" || Name == "addcarry.u64" || 3324 Name == "subborrow.u32" || Name == "subborrow.u64")) { 3325 Intrinsic::ID IID; 3326 if (Name[0] == 'a' && Name.back() == '2') 3327 IID = Intrinsic::x86_addcarry_32; 3328 else if (Name[0] == 'a' && Name.back() == '4') 3329 IID = Intrinsic::x86_addcarry_64; 3330 else if (Name[0] == 's' && Name.back() == '2') 3331 IID = Intrinsic::x86_subborrow_32; 3332 else if (Name[0] == 's' && Name.back() == '4') 3333 IID = Intrinsic::x86_subborrow_64; 3334 else 3335 llvm_unreachable("Unexpected intrinsic"); 3336 3337 // Make a call with 3 operands. 3338 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3339 CI->getArgOperand(2)}; 3340 Value *NewCall = Builder.CreateCall( 3341 Intrinsic::getDeclaration(CI->getModule(), IID), 3342 Args); 3343 3344 // Extract the second result and store it. 3345 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3346 // Cast the pointer to the right type. 3347 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3), 3348 llvm::PointerType::getUnqual(Data->getType())); 3349 Builder.CreateAlignedStore(Data, Ptr, 1); 3350 // Replace the original call result with the first result of the new call. 3351 Value *CF = Builder.CreateExtractValue(NewCall, 0); 3352 3353 CI->replaceAllUsesWith(CF); 3354 Rep = nullptr; 3355 } else if (IsX86 && Name.startswith("avx512.mask.") && 3356 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) { 3357 // Rep will be updated by the call in the condition. 3358 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 3359 Value *Arg = CI->getArgOperand(0); 3360 Value *Neg = Builder.CreateNeg(Arg, "neg"); 3361 Value *Cmp = Builder.CreateICmpSGE( 3362 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 3363 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 3364 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 3365 Name == "max.ui" || Name == "max.ull")) { 3366 Value *Arg0 = CI->getArgOperand(0); 3367 Value *Arg1 = CI->getArgOperand(1); 3368 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3369 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 3370 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 3371 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 3372 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 3373 Name == "min.ui" || Name == "min.ull")) { 3374 Value *Arg0 = CI->getArgOperand(0); 3375 Value *Arg1 = CI->getArgOperand(1); 3376 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3377 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 3378 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 3379 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 3380 } else if (IsNVVM && Name == "clz.ll") { 3381 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 3382 Value *Arg = CI->getArgOperand(0); 3383 Value *Ctlz = Builder.CreateCall( 3384 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 3385 {Arg->getType()}), 3386 {Arg, Builder.getFalse()}, "ctlz"); 3387 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 3388 } else if (IsNVVM && Name == "popc.ll") { 3389 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 3390 // i64. 3391 Value *Arg = CI->getArgOperand(0); 3392 Value *Popc = Builder.CreateCall( 3393 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 3394 {Arg->getType()}), 3395 Arg, "ctpop"); 3396 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 3397 } else if (IsNVVM && Name == "h2f") { 3398 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 3399 F->getParent(), Intrinsic::convert_from_fp16, 3400 {Builder.getFloatTy()}), 3401 CI->getArgOperand(0), "h2f"); 3402 } else { 3403 llvm_unreachable("Unknown function for CallInst upgrade."); 3404 } 3405 3406 if (Rep) 3407 CI->replaceAllUsesWith(Rep); 3408 CI->eraseFromParent(); 3409 return; 3410 } 3411 3412 const auto &DefaultCase = [&NewFn, &CI]() -> void { 3413 // Handle generic mangling change, but nothing else 3414 assert( 3415 (CI->getCalledFunction()->getName() != NewFn->getName()) && 3416 "Unknown function for CallInst upgrade and isn't just a name change"); 3417 CI->setCalledFunction(NewFn); 3418 }; 3419 CallInst *NewCall = nullptr; 3420 switch (NewFn->getIntrinsicID()) { 3421 default: { 3422 DefaultCase(); 3423 return; 3424 } 3425 3426 case Intrinsic::arm_neon_vld1: 3427 case Intrinsic::arm_neon_vld2: 3428 case Intrinsic::arm_neon_vld3: 3429 case Intrinsic::arm_neon_vld4: 3430 case Intrinsic::arm_neon_vld2lane: 3431 case Intrinsic::arm_neon_vld3lane: 3432 case Intrinsic::arm_neon_vld4lane: 3433 case Intrinsic::arm_neon_vst1: 3434 case Intrinsic::arm_neon_vst2: 3435 case Intrinsic::arm_neon_vst3: 3436 case Intrinsic::arm_neon_vst4: 3437 case Intrinsic::arm_neon_vst2lane: 3438 case Intrinsic::arm_neon_vst3lane: 3439 case Intrinsic::arm_neon_vst4lane: { 3440 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3441 CI->arg_operands().end()); 3442 NewCall = Builder.CreateCall(NewFn, Args); 3443 break; 3444 } 3445 3446 case Intrinsic::bitreverse: 3447 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3448 break; 3449 3450 case Intrinsic::ctlz: 3451 case Intrinsic::cttz: 3452 assert(CI->getNumArgOperands() == 1 && 3453 "Mismatch between function args and call args"); 3454 NewCall = 3455 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 3456 break; 3457 3458 case Intrinsic::objectsize: { 3459 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 3460 ? Builder.getFalse() 3461 : CI->getArgOperand(2); 3462 Value *Dynamic = 3463 CI->getNumArgOperands() < 3 ? Builder.getFalse() : CI->getArgOperand(3); 3464 NewCall = Builder.CreateCall( 3465 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic}); 3466 break; 3467 } 3468 3469 case Intrinsic::ctpop: 3470 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3471 break; 3472 3473 case Intrinsic::convert_from_fp16: 3474 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3475 break; 3476 3477 case Intrinsic::dbg_value: 3478 // Upgrade from the old version that had an extra offset argument. 3479 assert(CI->getNumArgOperands() == 4); 3480 // Drop nonzero offsets instead of attempting to upgrade them. 3481 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1))) 3482 if (Offset->isZeroValue()) { 3483 NewCall = Builder.CreateCall( 3484 NewFn, 3485 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)}); 3486 break; 3487 } 3488 CI->eraseFromParent(); 3489 return; 3490 3491 case Intrinsic::x86_xop_vfrcz_ss: 3492 case Intrinsic::x86_xop_vfrcz_sd: 3493 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 3494 break; 3495 3496 case Intrinsic::x86_xop_vpermil2pd: 3497 case Intrinsic::x86_xop_vpermil2ps: 3498 case Intrinsic::x86_xop_vpermil2pd_256: 3499 case Intrinsic::x86_xop_vpermil2ps_256: { 3500 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3501 CI->arg_operands().end()); 3502 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 3503 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 3504 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 3505 NewCall = Builder.CreateCall(NewFn, Args); 3506 break; 3507 } 3508 3509 case Intrinsic::x86_sse41_ptestc: 3510 case Intrinsic::x86_sse41_ptestz: 3511 case Intrinsic::x86_sse41_ptestnzc: { 3512 // The arguments for these intrinsics used to be v4f32, and changed 3513 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 3514 // So, the only thing required is a bitcast for both arguments. 3515 // First, check the arguments have the old type. 3516 Value *Arg0 = CI->getArgOperand(0); 3517 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 3518 return; 3519 3520 // Old intrinsic, add bitcasts 3521 Value *Arg1 = CI->getArgOperand(1); 3522 3523 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 3524 3525 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 3526 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 3527 3528 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 3529 break; 3530 } 3531 3532 case Intrinsic::x86_rdtscp: { 3533 // This used to take 1 arguments. If we have no arguments, it is already 3534 // upgraded. 3535 if (CI->getNumOperands() == 0) 3536 return; 3537 3538 NewCall = Builder.CreateCall(NewFn); 3539 // Extract the second result and store it. 3540 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3541 // Cast the pointer to the right type. 3542 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0), 3543 llvm::PointerType::getUnqual(Data->getType())); 3544 Builder.CreateAlignedStore(Data, Ptr, 1); 3545 // Replace the original call result with the first result of the new call. 3546 Value *TSC = Builder.CreateExtractValue(NewCall, 0); 3547 3548 std::string Name = CI->getName(); 3549 if (!Name.empty()) { 3550 CI->setName(Name + ".old"); 3551 NewCall->setName(Name); 3552 } 3553 CI->replaceAllUsesWith(TSC); 3554 CI->eraseFromParent(); 3555 return; 3556 } 3557 3558 case Intrinsic::x86_sse41_insertps: 3559 case Intrinsic::x86_sse41_dppd: 3560 case Intrinsic::x86_sse41_dpps: 3561 case Intrinsic::x86_sse41_mpsadbw: 3562 case Intrinsic::x86_avx_dp_ps_256: 3563 case Intrinsic::x86_avx2_mpsadbw: { 3564 // Need to truncate the last argument from i32 to i8 -- this argument models 3565 // an inherently 8-bit immediate operand to these x86 instructions. 3566 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3567 CI->arg_operands().end()); 3568 3569 // Replace the last argument with a trunc. 3570 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 3571 NewCall = Builder.CreateCall(NewFn, Args); 3572 break; 3573 } 3574 3575 case Intrinsic::thread_pointer: { 3576 NewCall = Builder.CreateCall(NewFn, {}); 3577 break; 3578 } 3579 3580 case Intrinsic::invariant_start: 3581 case Intrinsic::invariant_end: 3582 case Intrinsic::masked_load: 3583 case Intrinsic::masked_store: 3584 case Intrinsic::masked_gather: 3585 case Intrinsic::masked_scatter: { 3586 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3587 CI->arg_operands().end()); 3588 NewCall = Builder.CreateCall(NewFn, Args); 3589 break; 3590 } 3591 3592 case Intrinsic::memcpy: 3593 case Intrinsic::memmove: 3594 case Intrinsic::memset: { 3595 // We have to make sure that the call signature is what we're expecting. 3596 // We only want to change the old signatures by removing the alignment arg: 3597 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1) 3598 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1) 3599 // @llvm.memset...(i8*, i8, i[32|64], i32, i1) 3600 // -> @llvm.memset...(i8*, i8, i[32|64], i1) 3601 // Note: i8*'s in the above can be any pointer type 3602 if (CI->getNumArgOperands() != 5) { 3603 DefaultCase(); 3604 return; 3605 } 3606 // Remove alignment argument (3), and add alignment attributes to the 3607 // dest/src pointers. 3608 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), 3609 CI->getArgOperand(2), CI->getArgOperand(4)}; 3610 NewCall = Builder.CreateCall(NewFn, Args); 3611 auto *MemCI = cast<MemIntrinsic>(NewCall); 3612 // All mem intrinsics support dest alignment. 3613 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3)); 3614 MemCI->setDestAlignment(Align->getZExtValue()); 3615 // Memcpy/Memmove also support source alignment. 3616 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) 3617 MTI->setSourceAlignment(Align->getZExtValue()); 3618 break; 3619 } 3620 } 3621 assert(NewCall && "Should have either set this variable or returned through " 3622 "the default case"); 3623 std::string Name = CI->getName(); 3624 if (!Name.empty()) { 3625 CI->setName(Name + ".old"); 3626 NewCall->setName(Name); 3627 } 3628 CI->replaceAllUsesWith(NewCall); 3629 CI->eraseFromParent(); 3630 } 3631 3632 void llvm::UpgradeCallsToIntrinsic(Function *F) { 3633 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 3634 3635 // Check if this function should be upgraded and get the replacement function 3636 // if there is one. 3637 Function *NewFn; 3638 if (UpgradeIntrinsicFunction(F, NewFn)) { 3639 // Replace all users of the old function with the new function or new 3640 // instructions. This is not a range loop because the call is deleted. 3641 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 3642 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 3643 UpgradeIntrinsicCall(CI, NewFn); 3644 3645 // Remove old function, no longer used, from the module. 3646 F->eraseFromParent(); 3647 } 3648 } 3649 3650 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 3651 // Check if the tag uses struct-path aware TBAA format. 3652 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 3653 return &MD; 3654 3655 auto &Context = MD.getContext(); 3656 if (MD.getNumOperands() == 3) { 3657 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 3658 MDNode *ScalarType = MDNode::get(Context, Elts); 3659 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 3660 Metadata *Elts2[] = {ScalarType, ScalarType, 3661 ConstantAsMetadata::get( 3662 Constant::getNullValue(Type::getInt64Ty(Context))), 3663 MD.getOperand(2)}; 3664 return MDNode::get(Context, Elts2); 3665 } 3666 // Create a MDNode <MD, MD, offset 0> 3667 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 3668 Type::getInt64Ty(Context)))}; 3669 return MDNode::get(Context, Elts); 3670 } 3671 3672 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 3673 Instruction *&Temp) { 3674 if (Opc != Instruction::BitCast) 3675 return nullptr; 3676 3677 Temp = nullptr; 3678 Type *SrcTy = V->getType(); 3679 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3680 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3681 LLVMContext &Context = V->getContext(); 3682 3683 // We have no information about target data layout, so we assume that 3684 // the maximum pointer size is 64bit. 3685 Type *MidTy = Type::getInt64Ty(Context); 3686 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 3687 3688 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 3689 } 3690 3691 return nullptr; 3692 } 3693 3694 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 3695 if (Opc != Instruction::BitCast) 3696 return nullptr; 3697 3698 Type *SrcTy = C->getType(); 3699 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3700 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3701 LLVMContext &Context = C->getContext(); 3702 3703 // We have no information about target data layout, so we assume that 3704 // the maximum pointer size is 64bit. 3705 Type *MidTy = Type::getInt64Ty(Context); 3706 3707 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 3708 DestTy); 3709 } 3710 3711 return nullptr; 3712 } 3713 3714 /// Check the debug info version number, if it is out-dated, drop the debug 3715 /// info. Return true if module is modified. 3716 bool llvm::UpgradeDebugInfo(Module &M) { 3717 unsigned Version = getDebugMetadataVersionFromModule(M); 3718 if (Version == DEBUG_METADATA_VERSION) { 3719 bool BrokenDebugInfo = false; 3720 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo)) 3721 report_fatal_error("Broken module found, compilation aborted!"); 3722 if (!BrokenDebugInfo) 3723 // Everything is ok. 3724 return false; 3725 else { 3726 // Diagnose malformed debug info. 3727 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M); 3728 M.getContext().diagnose(Diag); 3729 } 3730 } 3731 bool Modified = StripDebugInfo(M); 3732 if (Modified && Version != DEBUG_METADATA_VERSION) { 3733 // Diagnose a version mismatch. 3734 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 3735 M.getContext().diagnose(DiagVersion); 3736 } 3737 return Modified; 3738 } 3739 3740 bool llvm::UpgradeRetainReleaseMarker(Module &M) { 3741 bool Changed = false; 3742 NamedMDNode *ModRetainReleaseMarker = 3743 M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"); 3744 if (ModRetainReleaseMarker) { 3745 MDNode *Op = ModRetainReleaseMarker->getOperand(0); 3746 if (Op) { 3747 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0)); 3748 if (ID) { 3749 SmallVector<StringRef, 4> ValueComp; 3750 ID->getString().split(ValueComp, "#"); 3751 if (ValueComp.size() == 2) { 3752 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str(); 3753 Metadata *Ops[1] = {MDString::get(M.getContext(), NewValue)}; 3754 ModRetainReleaseMarker->setOperand(0, 3755 MDNode::get(M.getContext(), Ops)); 3756 Changed = true; 3757 } 3758 } 3759 } 3760 } 3761 return Changed; 3762 } 3763 3764 bool llvm::UpgradeModuleFlags(Module &M) { 3765 NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 3766 if (!ModFlags) 3767 return false; 3768 3769 bool HasObjCFlag = false, HasClassProperties = false, Changed = false; 3770 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 3771 MDNode *Op = ModFlags->getOperand(I); 3772 if (Op->getNumOperands() != 3) 3773 continue; 3774 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 3775 if (!ID) 3776 continue; 3777 if (ID->getString() == "Objective-C Image Info Version") 3778 HasObjCFlag = true; 3779 if (ID->getString() == "Objective-C Class Properties") 3780 HasClassProperties = true; 3781 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two 3782 // field was Error and now they are Max. 3783 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") { 3784 if (auto *Behavior = 3785 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) { 3786 if (Behavior->getLimitedValue() == Module::Error) { 3787 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 3788 Metadata *Ops[3] = { 3789 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)), 3790 MDString::get(M.getContext(), ID->getString()), 3791 Op->getOperand(2)}; 3792 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3793 Changed = true; 3794 } 3795 } 3796 } 3797 // Upgrade Objective-C Image Info Section. Removed the whitespce in the 3798 // section name so that llvm-lto will not complain about mismatching 3799 // module flags that is functionally the same. 3800 if (ID->getString() == "Objective-C Image Info Section") { 3801 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) { 3802 SmallVector<StringRef, 4> ValueComp; 3803 Value->getString().split(ValueComp, " "); 3804 if (ValueComp.size() != 1) { 3805 std::string NewValue; 3806 for (auto &S : ValueComp) 3807 NewValue += S.str(); 3808 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1), 3809 MDString::get(M.getContext(), NewValue)}; 3810 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3811 Changed = true; 3812 } 3813 } 3814 } 3815 } 3816 3817 // "Objective-C Class Properties" is recently added for Objective-C. We 3818 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 3819 // flag of value 0, so we can correclty downgrade this flag when trying to 3820 // link an ObjC bitcode without this module flag with an ObjC bitcode with 3821 // this module flag. 3822 if (HasObjCFlag && !HasClassProperties) { 3823 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 3824 (uint32_t)0); 3825 Changed = true; 3826 } 3827 3828 return Changed; 3829 } 3830 3831 void llvm::UpgradeSectionAttributes(Module &M) { 3832 auto TrimSpaces = [](StringRef Section) -> std::string { 3833 SmallVector<StringRef, 5> Components; 3834 Section.split(Components, ','); 3835 3836 SmallString<32> Buffer; 3837 raw_svector_ostream OS(Buffer); 3838 3839 for (auto Component : Components) 3840 OS << ',' << Component.trim(); 3841 3842 return OS.str().substr(1); 3843 }; 3844 3845 for (auto &GV : M.globals()) { 3846 if (!GV.hasSection()) 3847 continue; 3848 3849 StringRef Section = GV.getSection(); 3850 3851 if (!Section.startswith("__DATA, __objc_catlist")) 3852 continue; 3853 3854 // __DATA, __objc_catlist, regular, no_dead_strip 3855 // __DATA,__objc_catlist,regular,no_dead_strip 3856 GV.setSection(TrimSpaces(Section)); 3857 } 3858 } 3859 3860 static bool isOldLoopArgument(Metadata *MD) { 3861 auto *T = dyn_cast_or_null<MDTuple>(MD); 3862 if (!T) 3863 return false; 3864 if (T->getNumOperands() < 1) 3865 return false; 3866 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 3867 if (!S) 3868 return false; 3869 return S->getString().startswith("llvm.vectorizer."); 3870 } 3871 3872 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 3873 StringRef OldPrefix = "llvm.vectorizer."; 3874 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 3875 3876 if (OldTag == "llvm.vectorizer.unroll") 3877 return MDString::get(C, "llvm.loop.interleave.count"); 3878 3879 return MDString::get( 3880 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 3881 .str()); 3882 } 3883 3884 static Metadata *upgradeLoopArgument(Metadata *MD) { 3885 auto *T = dyn_cast_or_null<MDTuple>(MD); 3886 if (!T) 3887 return MD; 3888 if (T->getNumOperands() < 1) 3889 return MD; 3890 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 3891 if (!OldTag) 3892 return MD; 3893 if (!OldTag->getString().startswith("llvm.vectorizer.")) 3894 return MD; 3895 3896 // This has an old tag. Upgrade it. 3897 SmallVector<Metadata *, 8> Ops; 3898 Ops.reserve(T->getNumOperands()); 3899 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 3900 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 3901 Ops.push_back(T->getOperand(I)); 3902 3903 return MDTuple::get(T->getContext(), Ops); 3904 } 3905 3906 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 3907 auto *T = dyn_cast<MDTuple>(&N); 3908 if (!T) 3909 return &N; 3910 3911 if (none_of(T->operands(), isOldLoopArgument)) 3912 return &N; 3913 3914 SmallVector<Metadata *, 8> Ops; 3915 Ops.reserve(T->getNumOperands()); 3916 for (Metadata *MD : T->operands()) 3917 Ops.push_back(upgradeLoopArgument(MD)); 3918 3919 return MDTuple::get(T->getContext(), Ops); 3920 } 3921