1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the auto-upgrade helper functions. 10 // This is where deprecated IR intrinsics and other IR features are updated to 11 // current specifications. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/AutoUpgrade.h" 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/DIBuilder.h" 19 #include "llvm/IR/DebugInfo.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/IR/Module.h" 27 #include "llvm/IR/Verifier.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/Regex.h" 30 #include <cstring> 31 using namespace llvm; 32 33 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 34 35 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 36 // changed their type from v4f32 to v2i64. 37 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 38 Function *&NewFn) { 39 // Check whether this is an old version of the function, which received 40 // v4f32 arguments. 41 Type *Arg0Type = F->getFunctionType()->getParamType(0); 42 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 43 return false; 44 45 // Yes, it's old, replace it with new version. 46 rename(F); 47 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 48 return true; 49 } 50 51 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 52 // arguments have changed their type from i32 to i8. 53 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 54 Function *&NewFn) { 55 // Check that the last argument is an i32. 56 Type *LastArgType = F->getFunctionType()->getParamType( 57 F->getFunctionType()->getNumParams() - 1); 58 if (!LastArgType->isIntegerTy(32)) 59 return false; 60 61 // Move this function aside and map down. 62 rename(F); 63 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 64 return true; 65 } 66 67 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 68 // All of the intrinsics matches below should be marked with which llvm 69 // version started autoupgrading them. At some point in the future we would 70 // like to use this information to remove upgrade code for some older 71 // intrinsics. It is currently undecided how we will determine that future 72 // point. 73 if (Name == "addcarryx.u32" || // Added in 8.0 74 Name == "addcarryx.u64" || // Added in 8.0 75 Name == "addcarry.u32" || // Added in 8.0 76 Name == "addcarry.u64" || // Added in 8.0 77 Name == "subborrow.u32" || // Added in 8.0 78 Name == "subborrow.u64" || // Added in 8.0 79 Name.startswith("sse2.padds.") || // Added in 8.0 80 Name.startswith("sse2.psubs.") || // Added in 8.0 81 Name.startswith("sse2.paddus.") || // Added in 8.0 82 Name.startswith("sse2.psubus.") || // Added in 8.0 83 Name.startswith("avx2.padds.") || // Added in 8.0 84 Name.startswith("avx2.psubs.") || // Added in 8.0 85 Name.startswith("avx2.paddus.") || // Added in 8.0 86 Name.startswith("avx2.psubus.") || // Added in 8.0 87 Name.startswith("avx512.padds.") || // Added in 8.0 88 Name.startswith("avx512.psubs.") || // Added in 8.0 89 Name.startswith("avx512.mask.padds.") || // Added in 8.0 90 Name.startswith("avx512.mask.psubs.") || // Added in 8.0 91 Name.startswith("avx512.mask.paddus.") || // Added in 8.0 92 Name.startswith("avx512.mask.psubus.") || // Added in 8.0 93 Name=="ssse3.pabs.b.128" || // Added in 6.0 94 Name=="ssse3.pabs.w.128" || // Added in 6.0 95 Name=="ssse3.pabs.d.128" || // Added in 6.0 96 Name.startswith("fma4.vfmadd.s") || // Added in 7.0 97 Name.startswith("fma.vfmadd.") || // Added in 7.0 98 Name.startswith("fma.vfmsub.") || // Added in 7.0 99 Name.startswith("fma.vfmaddsub.") || // Added in 7.0 100 Name.startswith("fma.vfmsubadd.") || // Added in 7.0 101 Name.startswith("fma.vfnmadd.") || // Added in 7.0 102 Name.startswith("fma.vfnmsub.") || // Added in 7.0 103 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0 104 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0 105 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0 106 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0 107 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0 108 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0 109 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0 110 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0 111 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0 112 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0 113 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0 114 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0 115 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0 116 Name.startswith("avx512.kunpck") || //added in 6.0 117 Name.startswith("avx2.pabs.") || // Added in 6.0 118 Name.startswith("avx512.mask.pabs.") || // Added in 6.0 119 Name.startswith("avx512.broadcastm") || // Added in 6.0 120 Name == "sse.sqrt.ss" || // Added in 7.0 121 Name == "sse2.sqrt.sd" || // Added in 7.0 122 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0 123 Name.startswith("avx.sqrt.p") || // Added in 7.0 124 Name.startswith("sse2.sqrt.p") || // Added in 7.0 125 Name.startswith("sse.sqrt.p") || // Added in 7.0 126 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0 127 Name.startswith("sse2.pcmpeq.") || // Added in 3.1 128 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 129 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 130 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 131 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 132 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 133 Name.startswith("avx.vperm2f128.") || // Added in 6.0 134 Name == "avx2.vperm2i128" || // Added in 6.0 135 Name == "sse.add.ss" || // Added in 4.0 136 Name == "sse2.add.sd" || // Added in 4.0 137 Name == "sse.sub.ss" || // Added in 4.0 138 Name == "sse2.sub.sd" || // Added in 4.0 139 Name == "sse.mul.ss" || // Added in 4.0 140 Name == "sse2.mul.sd" || // Added in 4.0 141 Name == "sse.div.ss" || // Added in 4.0 142 Name == "sse2.div.sd" || // Added in 4.0 143 Name == "sse41.pmaxsb" || // Added in 3.9 144 Name == "sse2.pmaxs.w" || // Added in 3.9 145 Name == "sse41.pmaxsd" || // Added in 3.9 146 Name == "sse2.pmaxu.b" || // Added in 3.9 147 Name == "sse41.pmaxuw" || // Added in 3.9 148 Name == "sse41.pmaxud" || // Added in 3.9 149 Name == "sse41.pminsb" || // Added in 3.9 150 Name == "sse2.pmins.w" || // Added in 3.9 151 Name == "sse41.pminsd" || // Added in 3.9 152 Name == "sse2.pminu.b" || // Added in 3.9 153 Name == "sse41.pminuw" || // Added in 3.9 154 Name == "sse41.pminud" || // Added in 3.9 155 Name == "avx512.kand.w" || // Added in 7.0 156 Name == "avx512.kandn.w" || // Added in 7.0 157 Name == "avx512.knot.w" || // Added in 7.0 158 Name == "avx512.kor.w" || // Added in 7.0 159 Name == "avx512.kxor.w" || // Added in 7.0 160 Name == "avx512.kxnor.w" || // Added in 7.0 161 Name == "avx512.kortestc.w" || // Added in 7.0 162 Name == "avx512.kortestz.w" || // Added in 7.0 163 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 164 Name.startswith("avx2.pmax") || // Added in 3.9 165 Name.startswith("avx2.pmin") || // Added in 3.9 166 Name.startswith("avx512.mask.pmax") || // Added in 4.0 167 Name.startswith("avx512.mask.pmin") || // Added in 4.0 168 Name.startswith("avx2.vbroadcast") || // Added in 3.8 169 Name.startswith("avx2.pbroadcast") || // Added in 3.8 170 Name.startswith("avx.vpermil.") || // Added in 3.1 171 Name.startswith("sse2.pshuf") || // Added in 3.9 172 Name.startswith("avx512.pbroadcast") || // Added in 3.9 173 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 174 Name.startswith("avx512.mask.movddup") || // Added in 3.9 175 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 176 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 177 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 178 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 179 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 180 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 181 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 182 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 183 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 184 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 185 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 186 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 187 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 188 Name.startswith("avx512.mask.pand.") || // Added in 3.9 189 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 190 Name.startswith("avx512.mask.por.") || // Added in 3.9 191 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 192 Name.startswith("avx512.mask.and.") || // Added in 3.9 193 Name.startswith("avx512.mask.andn.") || // Added in 3.9 194 Name.startswith("avx512.mask.or.") || // Added in 3.9 195 Name.startswith("avx512.mask.xor.") || // Added in 3.9 196 Name.startswith("avx512.mask.padd.") || // Added in 4.0 197 Name.startswith("avx512.mask.psub.") || // Added in 4.0 198 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 199 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 200 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 201 Name == "avx512.mask.cvtudq2ps.128" || // Added in 7.0 202 Name == "avx512.mask.cvtudq2ps.256" || // Added in 7.0 203 Name == "avx512.mask.cvtqq2pd.128" || // Added in 7.0 204 Name == "avx512.mask.cvtqq2pd.256" || // Added in 7.0 205 Name == "avx512.mask.cvtuqq2pd.128" || // Added in 7.0 206 Name == "avx512.mask.cvtuqq2pd.256" || // Added in 7.0 207 Name == "avx512.mask.cvtdq2ps.128" || // Added in 7.0 208 Name == "avx512.mask.cvtdq2ps.256" || // Added in 7.0 209 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0 210 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0 211 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0 212 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0 213 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0 214 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0 215 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0 216 Name == "avx512.cvtusi2sd" || // Added in 7.0 217 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 218 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 219 Name == "sse2.pmulu.dq" || // Added in 7.0 220 Name == "sse41.pmuldq" || // Added in 7.0 221 Name == "avx2.pmulu.dq" || // Added in 7.0 222 Name == "avx2.pmul.dq" || // Added in 7.0 223 Name == "avx512.pmulu.dq.512" || // Added in 7.0 224 Name == "avx512.pmul.dq.512" || // Added in 7.0 225 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 226 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 227 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0 228 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0 229 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0 230 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0 231 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0 232 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 233 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 234 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 235 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 236 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0 237 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0 238 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0 239 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0 240 Name.startswith("avx512.mask.cmp.p") || // Added in 7.0 241 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0 242 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0 243 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0 244 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0 245 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0 246 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 247 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 248 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 249 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 250 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 251 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 252 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 253 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 254 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 255 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 256 Name.startswith("avx512.mask.pslli") || // Added in 4.0 257 Name.startswith("avx512.mask.psrai") || // Added in 4.0 258 Name.startswith("avx512.mask.psrli") || // Added in 4.0 259 Name.startswith("avx512.mask.psllv") || // Added in 4.0 260 Name.startswith("avx512.mask.psrav") || // Added in 4.0 261 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 262 Name.startswith("sse41.pmovsx") || // Added in 3.8 263 Name.startswith("sse41.pmovzx") || // Added in 3.9 264 Name.startswith("avx2.pmovsx") || // Added in 3.9 265 Name.startswith("avx2.pmovzx") || // Added in 3.9 266 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 267 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 268 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 269 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0 270 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0 271 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0 272 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0 273 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0 274 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0 275 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0 276 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0 277 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0 278 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0 279 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0 280 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0 281 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0 282 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0 283 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0 284 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0 285 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0 286 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0 287 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0 288 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0 289 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0 290 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0 291 Name.startswith("avx512.vpshld.") || // Added in 8.0 292 Name.startswith("avx512.vpshrd.") || // Added in 8.0 293 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0 294 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0 295 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0 296 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0 297 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0 298 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0 299 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0 300 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0 301 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0 302 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0 303 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0 304 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0 305 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0 306 Name == "sse.cvtsi2ss" || // Added in 7.0 307 Name == "sse.cvtsi642ss" || // Added in 7.0 308 Name == "sse2.cvtsi2sd" || // Added in 7.0 309 Name == "sse2.cvtsi642sd" || // Added in 7.0 310 Name == "sse2.cvtss2sd" || // Added in 7.0 311 Name == "sse2.cvtdq2pd" || // Added in 3.9 312 Name == "sse2.cvtdq2ps" || // Added in 7.0 313 Name == "sse2.cvtps2pd" || // Added in 3.9 314 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 315 Name == "avx.cvtdq2.ps.256" || // Added in 7.0 316 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 317 Name.startswith("avx.vinsertf128.") || // Added in 3.7 318 Name == "avx2.vinserti128" || // Added in 3.7 319 Name.startswith("avx512.mask.insert") || // Added in 4.0 320 Name.startswith("avx.vextractf128.") || // Added in 3.7 321 Name == "avx2.vextracti128" || // Added in 3.7 322 Name.startswith("avx512.mask.vextract") || // Added in 4.0 323 Name.startswith("sse4a.movnt.") || // Added in 3.9 324 Name.startswith("avx.movnt.") || // Added in 3.2 325 Name.startswith("avx512.storent.") || // Added in 3.9 326 Name == "sse41.movntdqa" || // Added in 5.0 327 Name == "avx2.movntdqa" || // Added in 5.0 328 Name == "avx512.movntdqa" || // Added in 5.0 329 Name == "sse2.storel.dq" || // Added in 3.9 330 Name.startswith("sse.storeu.") || // Added in 3.9 331 Name.startswith("sse2.storeu.") || // Added in 3.9 332 Name.startswith("avx.storeu.") || // Added in 3.9 333 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 334 Name.startswith("avx512.mask.store.p") || // Added in 3.9 335 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 336 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 337 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 338 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 339 Name == "avx512.mask.store.ss" || // Added in 7.0 340 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 341 Name.startswith("avx512.mask.load.") || // Added in 3.9 342 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0 343 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0 344 Name == "sse42.crc32.64.8" || // Added in 3.4 345 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 346 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0 347 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 348 Name.startswith("avx512.mask.valign.") || // Added in 4.0 349 Name.startswith("sse2.psll.dq") || // Added in 3.7 350 Name.startswith("sse2.psrl.dq") || // Added in 3.7 351 Name.startswith("avx2.psll.dq") || // Added in 3.7 352 Name.startswith("avx2.psrl.dq") || // Added in 3.7 353 Name.startswith("avx512.psll.dq") || // Added in 3.9 354 Name.startswith("avx512.psrl.dq") || // Added in 3.9 355 Name == "sse41.pblendw" || // Added in 3.7 356 Name.startswith("sse41.blendp") || // Added in 3.7 357 Name.startswith("avx.blend.p") || // Added in 3.7 358 Name == "avx2.pblendw" || // Added in 3.7 359 Name.startswith("avx2.pblendd.") || // Added in 3.7 360 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 361 Name == "avx2.vbroadcasti128" || // Added in 3.7 362 Name.startswith("avx512.mask.broadcastf") || // Added in 6.0 363 Name.startswith("avx512.mask.broadcasti") || // Added in 6.0 364 Name == "xop.vpcmov" || // Added in 3.8 365 Name == "xop.vpcmov.256" || // Added in 5.0 366 Name.startswith("avx512.mask.move.s") || // Added in 4.0 367 Name.startswith("avx512.cvtmask2") || // Added in 5.0 368 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0 369 Name.startswith("xop.vprot") || // Added in 8.0 370 Name.startswith("avx512.prol") || // Added in 8.0 371 Name.startswith("avx512.pror") || // Added in 8.0 372 Name.startswith("avx512.mask.prorv.") || // Added in 8.0 373 Name.startswith("avx512.mask.pror.") || // Added in 8.0 374 Name.startswith("avx512.mask.prolv.") || // Added in 8.0 375 Name.startswith("avx512.mask.prol.") || // Added in 8.0 376 Name.startswith("avx512.ptestm") || //Added in 6.0 377 Name.startswith("avx512.ptestnm") || //Added in 6.0 378 Name.startswith("sse2.pavg") || // Added in 6.0 379 Name.startswith("avx2.pavg") || // Added in 6.0 380 Name.startswith("avx512.mask.pavg")) // Added in 6.0 381 return true; 382 383 return false; 384 } 385 386 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 387 Function *&NewFn) { 388 // Only handle intrinsics that start with "x86.". 389 if (!Name.startswith("x86.")) 390 return false; 391 // Remove "x86." prefix. 392 Name = Name.substr(4); 393 394 if (ShouldUpgradeX86Intrinsic(F, Name)) { 395 NewFn = nullptr; 396 return true; 397 } 398 399 if (Name == "rdtscp") { // Added in 8.0 400 // If this intrinsic has 0 operands, it's the new version. 401 if (F->getFunctionType()->getNumParams() == 0) 402 return false; 403 404 rename(F); 405 NewFn = Intrinsic::getDeclaration(F->getParent(), 406 Intrinsic::x86_rdtscp); 407 return true; 408 } 409 410 // SSE4.1 ptest functions may have an old signature. 411 if (Name.startswith("sse41.ptest")) { // Added in 3.2 412 if (Name.substr(11) == "c") 413 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 414 if (Name.substr(11) == "z") 415 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 416 if (Name.substr(11) == "nzc") 417 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 418 } 419 // Several blend and other instructions with masks used the wrong number of 420 // bits. 421 if (Name == "sse41.insertps") // Added in 3.6 422 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 423 NewFn); 424 if (Name == "sse41.dppd") // Added in 3.6 425 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 426 NewFn); 427 if (Name == "sse41.dpps") // Added in 3.6 428 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 429 NewFn); 430 if (Name == "sse41.mpsadbw") // Added in 3.6 431 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 432 NewFn); 433 if (Name == "avx.dp.ps.256") // Added in 3.6 434 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 435 NewFn); 436 if (Name == "avx2.mpsadbw") // Added in 3.6 437 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 438 NewFn); 439 440 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 441 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 442 rename(F); 443 NewFn = Intrinsic::getDeclaration(F->getParent(), 444 Intrinsic::x86_xop_vfrcz_ss); 445 return true; 446 } 447 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 448 rename(F); 449 NewFn = Intrinsic::getDeclaration(F->getParent(), 450 Intrinsic::x86_xop_vfrcz_sd); 451 return true; 452 } 453 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 454 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 455 auto Idx = F->getFunctionType()->getParamType(2); 456 if (Idx->isFPOrFPVectorTy()) { 457 rename(F); 458 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 459 unsigned EltSize = Idx->getScalarSizeInBits(); 460 Intrinsic::ID Permil2ID; 461 if (EltSize == 64 && IdxSize == 128) 462 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 463 else if (EltSize == 32 && IdxSize == 128) 464 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 465 else if (EltSize == 64 && IdxSize == 256) 466 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 467 else 468 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 469 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 470 return true; 471 } 472 } 473 474 return false; 475 } 476 477 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 478 assert(F && "Illegal to upgrade a non-existent Function."); 479 480 // Quickly eliminate it, if it's not a candidate. 481 StringRef Name = F->getName(); 482 if (Name.size() <= 8 || !Name.startswith("llvm.")) 483 return false; 484 Name = Name.substr(5); // Strip off "llvm." 485 486 switch (Name[0]) { 487 default: break; 488 case 'a': { 489 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 490 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 491 F->arg_begin()->getType()); 492 return true; 493 } 494 if (Name.startswith("arm.neon.vclz")) { 495 Type* args[2] = { 496 F->arg_begin()->getType(), 497 Type::getInt1Ty(F->getContext()) 498 }; 499 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 500 // the end of the name. Change name from llvm.arm.neon.vclz.* to 501 // llvm.ctlz.* 502 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 503 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 504 "llvm.ctlz." + Name.substr(14), F->getParent()); 505 return true; 506 } 507 if (Name.startswith("arm.neon.vcnt")) { 508 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 509 F->arg_begin()->getType()); 510 return true; 511 } 512 Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 513 if (vldRegex.match(Name)) { 514 auto fArgs = F->getFunctionType()->params(); 515 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 516 // Can't use Intrinsic::getDeclaration here as the return types might 517 // then only be structurally equal. 518 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 519 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 520 "llvm." + Name + ".p0i8", F->getParent()); 521 return true; 522 } 523 Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 524 if (vstRegex.match(Name)) { 525 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 526 Intrinsic::arm_neon_vst2, 527 Intrinsic::arm_neon_vst3, 528 Intrinsic::arm_neon_vst4}; 529 530 static const Intrinsic::ID StoreLaneInts[] = { 531 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 532 Intrinsic::arm_neon_vst4lane 533 }; 534 535 auto fArgs = F->getFunctionType()->params(); 536 Type *Tys[] = {fArgs[0], fArgs[1]}; 537 if (Name.find("lane") == StringRef::npos) 538 NewFn = Intrinsic::getDeclaration(F->getParent(), 539 StoreInts[fArgs.size() - 3], Tys); 540 else 541 NewFn = Intrinsic::getDeclaration(F->getParent(), 542 StoreLaneInts[fArgs.size() - 5], Tys); 543 return true; 544 } 545 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 546 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 547 return true; 548 } 549 if (Name == "x86.seh.recoverfp") { 550 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp); 551 return true; 552 } 553 break; 554 } 555 556 case 'c': { 557 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 558 rename(F); 559 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 560 F->arg_begin()->getType()); 561 return true; 562 } 563 if (Name.startswith("cttz.") && F->arg_size() == 1) { 564 rename(F); 565 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 566 F->arg_begin()->getType()); 567 return true; 568 } 569 break; 570 } 571 case 'd': { 572 if (Name == "dbg.value" && F->arg_size() == 4) { 573 rename(F); 574 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value); 575 return true; 576 } 577 break; 578 } 579 case 'i': 580 case 'l': { 581 bool IsLifetimeStart = Name.startswith("lifetime.start"); 582 if (IsLifetimeStart || Name.startswith("invariant.start")) { 583 Intrinsic::ID ID = IsLifetimeStart ? 584 Intrinsic::lifetime_start : Intrinsic::invariant_start; 585 auto Args = F->getFunctionType()->params(); 586 Type* ObjectPtr[1] = {Args[1]}; 587 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 588 rename(F); 589 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 590 return true; 591 } 592 } 593 594 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 595 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 596 Intrinsic::ID ID = IsLifetimeEnd ? 597 Intrinsic::lifetime_end : Intrinsic::invariant_end; 598 599 auto Args = F->getFunctionType()->params(); 600 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 601 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 602 rename(F); 603 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 604 return true; 605 } 606 } 607 if (Name.startswith("invariant.group.barrier")) { 608 // Rename invariant.group.barrier to launder.invariant.group 609 auto Args = F->getFunctionType()->params(); 610 Type* ObjectPtr[1] = {Args[0]}; 611 rename(F); 612 NewFn = Intrinsic::getDeclaration(F->getParent(), 613 Intrinsic::launder_invariant_group, ObjectPtr); 614 return true; 615 616 } 617 618 break; 619 } 620 case 'm': { 621 if (Name.startswith("masked.load.")) { 622 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 623 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 624 rename(F); 625 NewFn = Intrinsic::getDeclaration(F->getParent(), 626 Intrinsic::masked_load, 627 Tys); 628 return true; 629 } 630 } 631 if (Name.startswith("masked.store.")) { 632 auto Args = F->getFunctionType()->params(); 633 Type *Tys[] = { Args[0], Args[1] }; 634 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 635 rename(F); 636 NewFn = Intrinsic::getDeclaration(F->getParent(), 637 Intrinsic::masked_store, 638 Tys); 639 return true; 640 } 641 } 642 // Renaming gather/scatter intrinsics with no address space overloading 643 // to the new overload which includes an address space 644 if (Name.startswith("masked.gather.")) { 645 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; 646 if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) { 647 rename(F); 648 NewFn = Intrinsic::getDeclaration(F->getParent(), 649 Intrinsic::masked_gather, Tys); 650 return true; 651 } 652 } 653 if (Name.startswith("masked.scatter.")) { 654 auto Args = F->getFunctionType()->params(); 655 Type *Tys[] = {Args[0], Args[1]}; 656 if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) { 657 rename(F); 658 NewFn = Intrinsic::getDeclaration(F->getParent(), 659 Intrinsic::masked_scatter, Tys); 660 return true; 661 } 662 } 663 // Updating the memory intrinsics (memcpy/memmove/memset) that have an 664 // alignment parameter to embedding the alignment as an attribute of 665 // the pointer args. 666 if (Name.startswith("memcpy.") && F->arg_size() == 5) { 667 rename(F); 668 // Get the types of dest, src, and len 669 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 670 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy, 671 ParamTypes); 672 return true; 673 } 674 if (Name.startswith("memmove.") && F->arg_size() == 5) { 675 rename(F); 676 // Get the types of dest, src, and len 677 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 678 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove, 679 ParamTypes); 680 return true; 681 } 682 if (Name.startswith("memset.") && F->arg_size() == 5) { 683 rename(F); 684 // Get the types of dest, and len 685 const auto *FT = F->getFunctionType(); 686 Type *ParamTypes[2] = { 687 FT->getParamType(0), // Dest 688 FT->getParamType(2) // len 689 }; 690 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset, 691 ParamTypes); 692 return true; 693 } 694 break; 695 } 696 case 'n': { 697 if (Name.startswith("nvvm.")) { 698 Name = Name.substr(5); 699 700 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 701 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 702 .Cases("brev32", "brev64", Intrinsic::bitreverse) 703 .Case("clz.i", Intrinsic::ctlz) 704 .Case("popc.i", Intrinsic::ctpop) 705 .Default(Intrinsic::not_intrinsic); 706 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 707 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 708 {F->getReturnType()}); 709 return true; 710 } 711 712 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 713 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 714 // 715 // TODO: We could add lohi.i2d. 716 bool Expand = StringSwitch<bool>(Name) 717 .Cases("abs.i", "abs.ll", true) 718 .Cases("clz.ll", "popc.ll", "h2f", true) 719 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 720 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 721 .Default(false); 722 if (Expand) { 723 NewFn = nullptr; 724 return true; 725 } 726 } 727 break; 728 } 729 case 'o': 730 // We only need to change the name to match the mangling including the 731 // address space. 732 if (Name.startswith("objectsize.")) { 733 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 734 if (F->arg_size() == 2 || 735 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 736 rename(F); 737 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 738 Tys); 739 return true; 740 } 741 } 742 break; 743 744 case 's': 745 if (Name == "stackprotectorcheck") { 746 NewFn = nullptr; 747 return true; 748 } 749 break; 750 751 case 'x': 752 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 753 return true; 754 } 755 // Remangle our intrinsic since we upgrade the mangling 756 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 757 if (Result != None) { 758 NewFn = Result.getValue(); 759 return true; 760 } 761 762 // This may not belong here. This function is effectively being overloaded 763 // to both detect an intrinsic which needs upgrading, and to provide the 764 // upgraded form of the intrinsic. We should perhaps have two separate 765 // functions for this. 766 return false; 767 } 768 769 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 770 NewFn = nullptr; 771 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 772 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 773 774 // Upgrade intrinsic attributes. This does not change the function. 775 if (NewFn) 776 F = NewFn; 777 if (Intrinsic::ID id = F->getIntrinsicID()) 778 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 779 return Upgraded; 780 } 781 782 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 783 // Nothing to do yet. 784 return false; 785 } 786 787 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 788 // to byte shuffles. 789 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 790 Value *Op, unsigned Shift) { 791 Type *ResultTy = Op->getType(); 792 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 793 794 // Bitcast from a 64-bit element type to a byte element type. 795 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 796 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 797 798 // We'll be shuffling in zeroes. 799 Value *Res = Constant::getNullValue(VecTy); 800 801 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 802 // we'll just return the zero vector. 803 if (Shift < 16) { 804 uint32_t Idxs[64]; 805 // 256/512-bit version is split into 2/4 16-byte lanes. 806 for (unsigned l = 0; l != NumElts; l += 16) 807 for (unsigned i = 0; i != 16; ++i) { 808 unsigned Idx = NumElts + i - Shift; 809 if (Idx < NumElts) 810 Idx -= NumElts - 16; // end of lane, switch operand. 811 Idxs[l + i] = Idx + l; 812 } 813 814 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 815 } 816 817 // Bitcast back to a 64-bit element type. 818 return Builder.CreateBitCast(Res, ResultTy, "cast"); 819 } 820 821 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 822 // to byte shuffles. 823 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 824 unsigned Shift) { 825 Type *ResultTy = Op->getType(); 826 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 827 828 // Bitcast from a 64-bit element type to a byte element type. 829 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 830 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 831 832 // We'll be shuffling in zeroes. 833 Value *Res = Constant::getNullValue(VecTy); 834 835 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 836 // we'll just return the zero vector. 837 if (Shift < 16) { 838 uint32_t Idxs[64]; 839 // 256/512-bit version is split into 2/4 16-byte lanes. 840 for (unsigned l = 0; l != NumElts; l += 16) 841 for (unsigned i = 0; i != 16; ++i) { 842 unsigned Idx = i + Shift; 843 if (Idx >= 16) 844 Idx += NumElts - 16; // end of lane, switch operand. 845 Idxs[l + i] = Idx + l; 846 } 847 848 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 849 } 850 851 // Bitcast back to a 64-bit element type. 852 return Builder.CreateBitCast(Res, ResultTy, "cast"); 853 } 854 855 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 856 unsigned NumElts) { 857 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 858 cast<IntegerType>(Mask->getType())->getBitWidth()); 859 Mask = Builder.CreateBitCast(Mask, MaskTy); 860 861 // If we have less than 8 elements, then the starting mask was an i8 and 862 // we need to extract down to the right number of elements. 863 if (NumElts < 8) { 864 uint32_t Indices[4]; 865 for (unsigned i = 0; i != NumElts; ++i) 866 Indices[i] = i; 867 Mask = Builder.CreateShuffleVector(Mask, Mask, 868 makeArrayRef(Indices, NumElts), 869 "extract"); 870 } 871 872 return Mask; 873 } 874 875 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 876 Value *Op0, Value *Op1) { 877 // If the mask is all ones just emit the first operation. 878 if (const auto *C = dyn_cast<Constant>(Mask)) 879 if (C->isAllOnesValue()) 880 return Op0; 881 882 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 883 return Builder.CreateSelect(Mask, Op0, Op1); 884 } 885 886 static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, 887 Value *Op0, Value *Op1) { 888 // If the mask is all ones just emit the first operation. 889 if (const auto *C = dyn_cast<Constant>(Mask)) 890 if (C->isAllOnesValue()) 891 return Op0; 892 893 llvm::VectorType *MaskTy = 894 llvm::VectorType::get(Builder.getInt1Ty(), 895 Mask->getType()->getIntegerBitWidth()); 896 Mask = Builder.CreateBitCast(Mask, MaskTy); 897 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0); 898 return Builder.CreateSelect(Mask, Op0, Op1); 899 } 900 901 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 902 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 903 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 904 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 905 Value *Op1, Value *Shift, 906 Value *Passthru, Value *Mask, 907 bool IsVALIGN) { 908 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 909 910 unsigned NumElts = Op0->getType()->getVectorNumElements(); 911 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 912 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 913 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 914 915 // Mask the immediate for VALIGN. 916 if (IsVALIGN) 917 ShiftVal &= (NumElts - 1); 918 919 // If palignr is shifting the pair of vectors more than the size of two 920 // lanes, emit zero. 921 if (ShiftVal >= 32) 922 return llvm::Constant::getNullValue(Op0->getType()); 923 924 // If palignr is shifting the pair of input vectors more than one lane, 925 // but less than two lanes, convert to shifting in zeroes. 926 if (ShiftVal > 16) { 927 ShiftVal -= 16; 928 Op1 = Op0; 929 Op0 = llvm::Constant::getNullValue(Op0->getType()); 930 } 931 932 uint32_t Indices[64]; 933 // 256-bit palignr operates on 128-bit lanes so we need to handle that 934 for (unsigned l = 0; l < NumElts; l += 16) { 935 for (unsigned i = 0; i != 16; ++i) { 936 unsigned Idx = ShiftVal + i; 937 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 938 Idx += NumElts - 16; // End of lane, switch operand. 939 Indices[l + i] = Idx + l; 940 } 941 } 942 943 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 944 makeArrayRef(Indices, NumElts), 945 "palignr"); 946 947 return EmitX86Select(Builder, Mask, Align, Passthru); 948 } 949 950 static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI, 951 bool ZeroMask, bool IndexForm) { 952 Type *Ty = CI.getType(); 953 unsigned VecWidth = Ty->getPrimitiveSizeInBits(); 954 unsigned EltWidth = Ty->getScalarSizeInBits(); 955 bool IsFloat = Ty->isFPOrFPVectorTy(); 956 Intrinsic::ID IID; 957 if (VecWidth == 128 && EltWidth == 32 && IsFloat) 958 IID = Intrinsic::x86_avx512_vpermi2var_ps_128; 959 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat) 960 IID = Intrinsic::x86_avx512_vpermi2var_d_128; 961 else if (VecWidth == 128 && EltWidth == 64 && IsFloat) 962 IID = Intrinsic::x86_avx512_vpermi2var_pd_128; 963 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat) 964 IID = Intrinsic::x86_avx512_vpermi2var_q_128; 965 else if (VecWidth == 256 && EltWidth == 32 && IsFloat) 966 IID = Intrinsic::x86_avx512_vpermi2var_ps_256; 967 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 968 IID = Intrinsic::x86_avx512_vpermi2var_d_256; 969 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 970 IID = Intrinsic::x86_avx512_vpermi2var_pd_256; 971 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 972 IID = Intrinsic::x86_avx512_vpermi2var_q_256; 973 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 974 IID = Intrinsic::x86_avx512_vpermi2var_ps_512; 975 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 976 IID = Intrinsic::x86_avx512_vpermi2var_d_512; 977 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 978 IID = Intrinsic::x86_avx512_vpermi2var_pd_512; 979 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 980 IID = Intrinsic::x86_avx512_vpermi2var_q_512; 981 else if (VecWidth == 128 && EltWidth == 16) 982 IID = Intrinsic::x86_avx512_vpermi2var_hi_128; 983 else if (VecWidth == 256 && EltWidth == 16) 984 IID = Intrinsic::x86_avx512_vpermi2var_hi_256; 985 else if (VecWidth == 512 && EltWidth == 16) 986 IID = Intrinsic::x86_avx512_vpermi2var_hi_512; 987 else if (VecWidth == 128 && EltWidth == 8) 988 IID = Intrinsic::x86_avx512_vpermi2var_qi_128; 989 else if (VecWidth == 256 && EltWidth == 8) 990 IID = Intrinsic::x86_avx512_vpermi2var_qi_256; 991 else if (VecWidth == 512 && EltWidth == 8) 992 IID = Intrinsic::x86_avx512_vpermi2var_qi_512; 993 else 994 llvm_unreachable("Unexpected intrinsic"); 995 996 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1), 997 CI.getArgOperand(2) }; 998 999 // If this isn't index form we need to swap operand 0 and 1. 1000 if (!IndexForm) 1001 std::swap(Args[0], Args[1]); 1002 1003 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1004 Args); 1005 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) 1006 : Builder.CreateBitCast(CI.getArgOperand(1), 1007 Ty); 1008 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru); 1009 } 1010 1011 static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI, 1012 bool IsSigned, bool IsAddition) { 1013 Type *Ty = CI.getType(); 1014 Value *Op0 = CI.getOperand(0); 1015 Value *Op1 = CI.getOperand(1); 1016 1017 Intrinsic::ID IID = 1018 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat) 1019 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat); 1020 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1021 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1}); 1022 1023 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1024 Value *VecSrc = CI.getOperand(2); 1025 Value *Mask = CI.getOperand(3); 1026 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1027 } 1028 return Res; 1029 } 1030 1031 static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI, 1032 bool IsRotateRight) { 1033 Type *Ty = CI.getType(); 1034 Value *Src = CI.getArgOperand(0); 1035 Value *Amt = CI.getArgOperand(1); 1036 1037 // Amount may be scalar immediate, in which case create a splat vector. 1038 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1039 // we only care about the lowest log2 bits anyway. 1040 if (Amt->getType() != Ty) { 1041 unsigned NumElts = Ty->getVectorNumElements(); 1042 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1043 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1044 } 1045 1046 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; 1047 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1048 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt}); 1049 1050 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1051 Value *VecSrc = CI.getOperand(2); 1052 Value *Mask = CI.getOperand(3); 1053 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1054 } 1055 return Res; 1056 } 1057 1058 static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallInst &CI, unsigned Imm, 1059 bool IsSigned) { 1060 Type *Ty = CI.getType(); 1061 Value *LHS = CI.getArgOperand(0); 1062 Value *RHS = CI.getArgOperand(1); 1063 1064 CmpInst::Predicate Pred; 1065 switch (Imm) { 1066 case 0x0: 1067 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 1068 break; 1069 case 0x1: 1070 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 1071 break; 1072 case 0x2: 1073 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 1074 break; 1075 case 0x3: 1076 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 1077 break; 1078 case 0x4: 1079 Pred = ICmpInst::ICMP_EQ; 1080 break; 1081 case 0x5: 1082 Pred = ICmpInst::ICMP_NE; 1083 break; 1084 case 0x6: 1085 return Constant::getNullValue(Ty); // FALSE 1086 case 0x7: 1087 return Constant::getAllOnesValue(Ty); // TRUE 1088 default: 1089 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate"); 1090 } 1091 1092 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS); 1093 Value *Ext = Builder.CreateSExt(Cmp, Ty); 1094 return Ext; 1095 } 1096 1097 static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI, 1098 bool IsShiftRight, bool ZeroMask) { 1099 Type *Ty = CI.getType(); 1100 Value *Op0 = CI.getArgOperand(0); 1101 Value *Op1 = CI.getArgOperand(1); 1102 Value *Amt = CI.getArgOperand(2); 1103 1104 if (IsShiftRight) 1105 std::swap(Op0, Op1); 1106 1107 // Amount may be scalar immediate, in which case create a splat vector. 1108 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1109 // we only care about the lowest log2 bits anyway. 1110 if (Amt->getType() != Ty) { 1111 unsigned NumElts = Ty->getVectorNumElements(); 1112 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1113 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1114 } 1115 1116 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl; 1117 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1118 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt}); 1119 1120 unsigned NumArgs = CI.getNumArgOperands(); 1121 if (NumArgs >= 4) { // For masked intrinsics. 1122 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) : 1123 ZeroMask ? ConstantAggregateZero::get(CI.getType()) : 1124 CI.getArgOperand(0); 1125 Value *Mask = CI.getOperand(NumArgs - 1); 1126 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1127 } 1128 return Res; 1129 } 1130 1131 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 1132 Value *Ptr, Value *Data, Value *Mask, 1133 bool Aligned) { 1134 // Cast the pointer to the right type. 1135 Ptr = Builder.CreateBitCast(Ptr, 1136 llvm::PointerType::getUnqual(Data->getType())); 1137 unsigned Align = 1138 Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1; 1139 1140 // If the mask is all ones just emit a regular store. 1141 if (const auto *C = dyn_cast<Constant>(Mask)) 1142 if (C->isAllOnesValue()) 1143 return Builder.CreateAlignedStore(Data, Ptr, Align); 1144 1145 // Convert the mask from an integer type to a vector of i1. 1146 unsigned NumElts = Data->getType()->getVectorNumElements(); 1147 Mask = getX86MaskVec(Builder, Mask, NumElts); 1148 return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); 1149 } 1150 1151 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 1152 Value *Ptr, Value *Passthru, Value *Mask, 1153 bool Aligned) { 1154 // Cast the pointer to the right type. 1155 Ptr = Builder.CreateBitCast(Ptr, 1156 llvm::PointerType::getUnqual(Passthru->getType())); 1157 unsigned Align = 1158 Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1; 1159 1160 // If the mask is all ones just emit a regular store. 1161 if (const auto *C = dyn_cast<Constant>(Mask)) 1162 if (C->isAllOnesValue()) 1163 return Builder.CreateAlignedLoad(Ptr, Align); 1164 1165 // Convert the mask from an integer type to a vector of i1. 1166 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 1167 Mask = getX86MaskVec(Builder, Mask, NumElts); 1168 return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); 1169 } 1170 1171 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { 1172 Value *Op0 = CI.getArgOperand(0); 1173 llvm::Type *Ty = Op0->getType(); 1174 Value *Zero = llvm::Constant::getNullValue(Ty); 1175 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero); 1176 Value *Neg = Builder.CreateNeg(Op0); 1177 Value *Res = Builder.CreateSelect(Cmp, Op0, Neg); 1178 1179 if (CI.getNumArgOperands() == 3) 1180 Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1)); 1181 1182 return Res; 1183 } 1184 1185 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 1186 ICmpInst::Predicate Pred) { 1187 Value *Op0 = CI.getArgOperand(0); 1188 Value *Op1 = CI.getArgOperand(1); 1189 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 1190 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 1191 1192 if (CI.getNumArgOperands() == 4) 1193 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1194 1195 return Res; 1196 } 1197 1198 static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) { 1199 Type *Ty = CI.getType(); 1200 1201 // Arguments have a vXi32 type so cast to vXi64. 1202 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty); 1203 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty); 1204 1205 if (IsSigned) { 1206 // Shift left then arithmetic shift right. 1207 Constant *ShiftAmt = ConstantInt::get(Ty, 32); 1208 LHS = Builder.CreateShl(LHS, ShiftAmt); 1209 LHS = Builder.CreateAShr(LHS, ShiftAmt); 1210 RHS = Builder.CreateShl(RHS, ShiftAmt); 1211 RHS = Builder.CreateAShr(RHS, ShiftAmt); 1212 } else { 1213 // Clear the upper bits. 1214 Constant *Mask = ConstantInt::get(Ty, 0xffffffff); 1215 LHS = Builder.CreateAnd(LHS, Mask); 1216 RHS = Builder.CreateAnd(RHS, Mask); 1217 } 1218 1219 Value *Res = Builder.CreateMul(LHS, RHS); 1220 1221 if (CI.getNumArgOperands() == 4) 1222 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1223 1224 return Res; 1225 } 1226 1227 // Applying mask on vector of i1's and make sure result is at least 8 bits wide. 1228 static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, 1229 Value *Mask) { 1230 unsigned NumElts = Vec->getType()->getVectorNumElements(); 1231 if (Mask) { 1232 const auto *C = dyn_cast<Constant>(Mask); 1233 if (!C || !C->isAllOnesValue()) 1234 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts)); 1235 } 1236 1237 if (NumElts < 8) { 1238 uint32_t Indices[8]; 1239 for (unsigned i = 0; i != NumElts; ++i) 1240 Indices[i] = i; 1241 for (unsigned i = NumElts; i != 8; ++i) 1242 Indices[i] = NumElts + i % NumElts; 1243 Vec = Builder.CreateShuffleVector(Vec, 1244 Constant::getNullValue(Vec->getType()), 1245 Indices); 1246 } 1247 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U))); 1248 } 1249 1250 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 1251 unsigned CC, bool Signed) { 1252 Value *Op0 = CI.getArgOperand(0); 1253 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1254 1255 Value *Cmp; 1256 if (CC == 3) { 1257 Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1258 } else if (CC == 7) { 1259 Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1260 } else { 1261 ICmpInst::Predicate Pred; 1262 switch (CC) { 1263 default: llvm_unreachable("Unknown condition code"); 1264 case 0: Pred = ICmpInst::ICMP_EQ; break; 1265 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 1266 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 1267 case 4: Pred = ICmpInst::ICMP_NE; break; 1268 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 1269 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 1270 } 1271 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 1272 } 1273 1274 Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1); 1275 1276 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask); 1277 } 1278 1279 // Replace a masked intrinsic with an older unmasked intrinsic. 1280 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 1281 Intrinsic::ID IID) { 1282 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID); 1283 Value *Rep = Builder.CreateCall(Intrin, 1284 { CI.getArgOperand(0), CI.getArgOperand(1) }); 1285 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 1286 } 1287 1288 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 1289 Value* A = CI.getArgOperand(0); 1290 Value* B = CI.getArgOperand(1); 1291 Value* Src = CI.getArgOperand(2); 1292 Value* Mask = CI.getArgOperand(3); 1293 1294 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 1295 Value* Cmp = Builder.CreateIsNotNull(AndNode); 1296 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 1297 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 1298 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 1299 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 1300 } 1301 1302 1303 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 1304 Value* Op = CI.getArgOperand(0); 1305 Type* ReturnOp = CI.getType(); 1306 unsigned NumElts = CI.getType()->getVectorNumElements(); 1307 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 1308 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 1309 } 1310 1311 // Replace intrinsic with unmasked version and a select. 1312 static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, 1313 CallInst &CI, Value *&Rep) { 1314 Name = Name.substr(12); // Remove avx512.mask. 1315 1316 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits(); 1317 unsigned EltWidth = CI.getType()->getScalarSizeInBits(); 1318 Intrinsic::ID IID; 1319 if (Name.startswith("max.p")) { 1320 if (VecWidth == 128 && EltWidth == 32) 1321 IID = Intrinsic::x86_sse_max_ps; 1322 else if (VecWidth == 128 && EltWidth == 64) 1323 IID = Intrinsic::x86_sse2_max_pd; 1324 else if (VecWidth == 256 && EltWidth == 32) 1325 IID = Intrinsic::x86_avx_max_ps_256; 1326 else if (VecWidth == 256 && EltWidth == 64) 1327 IID = Intrinsic::x86_avx_max_pd_256; 1328 else 1329 llvm_unreachable("Unexpected intrinsic"); 1330 } else if (Name.startswith("min.p")) { 1331 if (VecWidth == 128 && EltWidth == 32) 1332 IID = Intrinsic::x86_sse_min_ps; 1333 else if (VecWidth == 128 && EltWidth == 64) 1334 IID = Intrinsic::x86_sse2_min_pd; 1335 else if (VecWidth == 256 && EltWidth == 32) 1336 IID = Intrinsic::x86_avx_min_ps_256; 1337 else if (VecWidth == 256 && EltWidth == 64) 1338 IID = Intrinsic::x86_avx_min_pd_256; 1339 else 1340 llvm_unreachable("Unexpected intrinsic"); 1341 } else if (Name.startswith("pshuf.b.")) { 1342 if (VecWidth == 128) 1343 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1344 else if (VecWidth == 256) 1345 IID = Intrinsic::x86_avx2_pshuf_b; 1346 else if (VecWidth == 512) 1347 IID = Intrinsic::x86_avx512_pshuf_b_512; 1348 else 1349 llvm_unreachable("Unexpected intrinsic"); 1350 } else if (Name.startswith("pmul.hr.sw.")) { 1351 if (VecWidth == 128) 1352 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128; 1353 else if (VecWidth == 256) 1354 IID = Intrinsic::x86_avx2_pmul_hr_sw; 1355 else if (VecWidth == 512) 1356 IID = Intrinsic::x86_avx512_pmul_hr_sw_512; 1357 else 1358 llvm_unreachable("Unexpected intrinsic"); 1359 } else if (Name.startswith("pmulh.w.")) { 1360 if (VecWidth == 128) 1361 IID = Intrinsic::x86_sse2_pmulh_w; 1362 else if (VecWidth == 256) 1363 IID = Intrinsic::x86_avx2_pmulh_w; 1364 else if (VecWidth == 512) 1365 IID = Intrinsic::x86_avx512_pmulh_w_512; 1366 else 1367 llvm_unreachable("Unexpected intrinsic"); 1368 } else if (Name.startswith("pmulhu.w.")) { 1369 if (VecWidth == 128) 1370 IID = Intrinsic::x86_sse2_pmulhu_w; 1371 else if (VecWidth == 256) 1372 IID = Intrinsic::x86_avx2_pmulhu_w; 1373 else if (VecWidth == 512) 1374 IID = Intrinsic::x86_avx512_pmulhu_w_512; 1375 else 1376 llvm_unreachable("Unexpected intrinsic"); 1377 } else if (Name.startswith("pmaddw.d.")) { 1378 if (VecWidth == 128) 1379 IID = Intrinsic::x86_sse2_pmadd_wd; 1380 else if (VecWidth == 256) 1381 IID = Intrinsic::x86_avx2_pmadd_wd; 1382 else if (VecWidth == 512) 1383 IID = Intrinsic::x86_avx512_pmaddw_d_512; 1384 else 1385 llvm_unreachable("Unexpected intrinsic"); 1386 } else if (Name.startswith("pmaddubs.w.")) { 1387 if (VecWidth == 128) 1388 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128; 1389 else if (VecWidth == 256) 1390 IID = Intrinsic::x86_avx2_pmadd_ub_sw; 1391 else if (VecWidth == 512) 1392 IID = Intrinsic::x86_avx512_pmaddubs_w_512; 1393 else 1394 llvm_unreachable("Unexpected intrinsic"); 1395 } else if (Name.startswith("packsswb.")) { 1396 if (VecWidth == 128) 1397 IID = Intrinsic::x86_sse2_packsswb_128; 1398 else if (VecWidth == 256) 1399 IID = Intrinsic::x86_avx2_packsswb; 1400 else if (VecWidth == 512) 1401 IID = Intrinsic::x86_avx512_packsswb_512; 1402 else 1403 llvm_unreachable("Unexpected intrinsic"); 1404 } else if (Name.startswith("packssdw.")) { 1405 if (VecWidth == 128) 1406 IID = Intrinsic::x86_sse2_packssdw_128; 1407 else if (VecWidth == 256) 1408 IID = Intrinsic::x86_avx2_packssdw; 1409 else if (VecWidth == 512) 1410 IID = Intrinsic::x86_avx512_packssdw_512; 1411 else 1412 llvm_unreachable("Unexpected intrinsic"); 1413 } else if (Name.startswith("packuswb.")) { 1414 if (VecWidth == 128) 1415 IID = Intrinsic::x86_sse2_packuswb_128; 1416 else if (VecWidth == 256) 1417 IID = Intrinsic::x86_avx2_packuswb; 1418 else if (VecWidth == 512) 1419 IID = Intrinsic::x86_avx512_packuswb_512; 1420 else 1421 llvm_unreachable("Unexpected intrinsic"); 1422 } else if (Name.startswith("packusdw.")) { 1423 if (VecWidth == 128) 1424 IID = Intrinsic::x86_sse41_packusdw; 1425 else if (VecWidth == 256) 1426 IID = Intrinsic::x86_avx2_packusdw; 1427 else if (VecWidth == 512) 1428 IID = Intrinsic::x86_avx512_packusdw_512; 1429 else 1430 llvm_unreachable("Unexpected intrinsic"); 1431 } else if (Name.startswith("vpermilvar.")) { 1432 if (VecWidth == 128 && EltWidth == 32) 1433 IID = Intrinsic::x86_avx_vpermilvar_ps; 1434 else if (VecWidth == 128 && EltWidth == 64) 1435 IID = Intrinsic::x86_avx_vpermilvar_pd; 1436 else if (VecWidth == 256 && EltWidth == 32) 1437 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 1438 else if (VecWidth == 256 && EltWidth == 64) 1439 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 1440 else if (VecWidth == 512 && EltWidth == 32) 1441 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 1442 else if (VecWidth == 512 && EltWidth == 64) 1443 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 1444 else 1445 llvm_unreachable("Unexpected intrinsic"); 1446 } else if (Name == "cvtpd2dq.256") { 1447 IID = Intrinsic::x86_avx_cvt_pd2dq_256; 1448 } else if (Name == "cvtpd2ps.256") { 1449 IID = Intrinsic::x86_avx_cvt_pd2_ps_256; 1450 } else if (Name == "cvttpd2dq.256") { 1451 IID = Intrinsic::x86_avx_cvtt_pd2dq_256; 1452 } else if (Name == "cvttps2dq.128") { 1453 IID = Intrinsic::x86_sse2_cvttps2dq; 1454 } else if (Name == "cvttps2dq.256") { 1455 IID = Intrinsic::x86_avx_cvtt_ps2dq_256; 1456 } else if (Name.startswith("permvar.")) { 1457 bool IsFloat = CI.getType()->isFPOrFPVectorTy(); 1458 if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1459 IID = Intrinsic::x86_avx2_permps; 1460 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1461 IID = Intrinsic::x86_avx2_permd; 1462 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1463 IID = Intrinsic::x86_avx512_permvar_df_256; 1464 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1465 IID = Intrinsic::x86_avx512_permvar_di_256; 1466 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1467 IID = Intrinsic::x86_avx512_permvar_sf_512; 1468 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1469 IID = Intrinsic::x86_avx512_permvar_si_512; 1470 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1471 IID = Intrinsic::x86_avx512_permvar_df_512; 1472 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1473 IID = Intrinsic::x86_avx512_permvar_di_512; 1474 else if (VecWidth == 128 && EltWidth == 16) 1475 IID = Intrinsic::x86_avx512_permvar_hi_128; 1476 else if (VecWidth == 256 && EltWidth == 16) 1477 IID = Intrinsic::x86_avx512_permvar_hi_256; 1478 else if (VecWidth == 512 && EltWidth == 16) 1479 IID = Intrinsic::x86_avx512_permvar_hi_512; 1480 else if (VecWidth == 128 && EltWidth == 8) 1481 IID = Intrinsic::x86_avx512_permvar_qi_128; 1482 else if (VecWidth == 256 && EltWidth == 8) 1483 IID = Intrinsic::x86_avx512_permvar_qi_256; 1484 else if (VecWidth == 512 && EltWidth == 8) 1485 IID = Intrinsic::x86_avx512_permvar_qi_512; 1486 else 1487 llvm_unreachable("Unexpected intrinsic"); 1488 } else if (Name.startswith("dbpsadbw.")) { 1489 if (VecWidth == 128) 1490 IID = Intrinsic::x86_avx512_dbpsadbw_128; 1491 else if (VecWidth == 256) 1492 IID = Intrinsic::x86_avx512_dbpsadbw_256; 1493 else if (VecWidth == 512) 1494 IID = Intrinsic::x86_avx512_dbpsadbw_512; 1495 else 1496 llvm_unreachable("Unexpected intrinsic"); 1497 } else if (Name.startswith("pmultishift.qb.")) { 1498 if (VecWidth == 128) 1499 IID = Intrinsic::x86_avx512_pmultishift_qb_128; 1500 else if (VecWidth == 256) 1501 IID = Intrinsic::x86_avx512_pmultishift_qb_256; 1502 else if (VecWidth == 512) 1503 IID = Intrinsic::x86_avx512_pmultishift_qb_512; 1504 else 1505 llvm_unreachable("Unexpected intrinsic"); 1506 } else 1507 return false; 1508 1509 SmallVector<Value *, 4> Args(CI.arg_operands().begin(), 1510 CI.arg_operands().end()); 1511 Args.pop_back(); 1512 Args.pop_back(); 1513 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1514 Args); 1515 unsigned NumArgs = CI.getNumArgOperands(); 1516 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep, 1517 CI.getArgOperand(NumArgs - 2)); 1518 return true; 1519 } 1520 1521 /// Upgrade comment in call to inline asm that represents an objc retain release 1522 /// marker. 1523 void llvm::UpgradeInlineAsmString(std::string *AsmStr) { 1524 size_t Pos; 1525 if (AsmStr->find("mov\tfp") == 0 && 1526 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos && 1527 (Pos = AsmStr->find("# marker")) != std::string::npos) { 1528 AsmStr->replace(Pos, 1, ";"); 1529 } 1530 return; 1531 } 1532 1533 /// Upgrade a call to an old intrinsic. All argument and return casting must be 1534 /// provided to seamlessly integrate with existing context. 1535 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 1536 Function *F = CI->getCalledFunction(); 1537 LLVMContext &C = CI->getContext(); 1538 IRBuilder<> Builder(C); 1539 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 1540 1541 assert(F && "Intrinsic call is not direct?"); 1542 1543 if (!NewFn) { 1544 // Get the Function's name. 1545 StringRef Name = F->getName(); 1546 1547 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 1548 Name = Name.substr(5); 1549 1550 bool IsX86 = Name.startswith("x86."); 1551 if (IsX86) 1552 Name = Name.substr(4); 1553 bool IsNVVM = Name.startswith("nvvm."); 1554 if (IsNVVM) 1555 Name = Name.substr(5); 1556 1557 if (IsX86 && Name.startswith("sse4a.movnt.")) { 1558 Module *M = F->getParent(); 1559 SmallVector<Metadata *, 1> Elts; 1560 Elts.push_back( 1561 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1562 MDNode *Node = MDNode::get(C, Elts); 1563 1564 Value *Arg0 = CI->getArgOperand(0); 1565 Value *Arg1 = CI->getArgOperand(1); 1566 1567 // Nontemporal (unaligned) store of the 0'th element of the float/double 1568 // vector. 1569 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 1570 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 1571 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 1572 Value *Extract = 1573 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 1574 1575 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); 1576 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1577 1578 // Remove intrinsic. 1579 CI->eraseFromParent(); 1580 return; 1581 } 1582 1583 if (IsX86 && (Name.startswith("avx.movnt.") || 1584 Name.startswith("avx512.storent."))) { 1585 Module *M = F->getParent(); 1586 SmallVector<Metadata *, 1> Elts; 1587 Elts.push_back( 1588 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1589 MDNode *Node = MDNode::get(C, Elts); 1590 1591 Value *Arg0 = CI->getArgOperand(0); 1592 Value *Arg1 = CI->getArgOperand(1); 1593 1594 // Convert the type of the pointer to a pointer to the stored type. 1595 Value *BC = Builder.CreateBitCast(Arg0, 1596 PointerType::getUnqual(Arg1->getType()), 1597 "cast"); 1598 VectorType *VTy = cast<VectorType>(Arg1->getType()); 1599 StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 1600 VTy->getBitWidth() / 8); 1601 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1602 1603 // Remove intrinsic. 1604 CI->eraseFromParent(); 1605 return; 1606 } 1607 1608 if (IsX86 && Name == "sse2.storel.dq") { 1609 Value *Arg0 = CI->getArgOperand(0); 1610 Value *Arg1 = CI->getArgOperand(1); 1611 1612 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 1613 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 1614 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 1615 Value *BC = Builder.CreateBitCast(Arg0, 1616 PointerType::getUnqual(Elt->getType()), 1617 "cast"); 1618 Builder.CreateAlignedStore(Elt, BC, 1); 1619 1620 // Remove intrinsic. 1621 CI->eraseFromParent(); 1622 return; 1623 } 1624 1625 if (IsX86 && (Name.startswith("sse.storeu.") || 1626 Name.startswith("sse2.storeu.") || 1627 Name.startswith("avx.storeu."))) { 1628 Value *Arg0 = CI->getArgOperand(0); 1629 Value *Arg1 = CI->getArgOperand(1); 1630 1631 Arg0 = Builder.CreateBitCast(Arg0, 1632 PointerType::getUnqual(Arg1->getType()), 1633 "cast"); 1634 Builder.CreateAlignedStore(Arg1, Arg0, 1); 1635 1636 // Remove intrinsic. 1637 CI->eraseFromParent(); 1638 return; 1639 } 1640 1641 if (IsX86 && Name == "avx512.mask.store.ss") { 1642 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1)); 1643 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1644 Mask, false); 1645 1646 // Remove intrinsic. 1647 CI->eraseFromParent(); 1648 return; 1649 } 1650 1651 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 1652 // "avx512.mask.storeu." or "avx512.mask.store." 1653 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 1654 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1655 CI->getArgOperand(2), Aligned); 1656 1657 // Remove intrinsic. 1658 CI->eraseFromParent(); 1659 return; 1660 } 1661 1662 Value *Rep; 1663 // Upgrade packed integer vector compare intrinsics to compare instructions. 1664 if (IsX86 && (Name.startswith("sse2.pcmp") || 1665 Name.startswith("avx2.pcmp"))) { 1666 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 1667 bool CmpEq = Name[9] == 'e'; 1668 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 1669 CI->getArgOperand(0), CI->getArgOperand(1)); 1670 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 1671 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) { 1672 Type *ExtTy = Type::getInt32Ty(C); 1673 if (CI->getOperand(0)->getType()->isIntegerTy(8)) 1674 ExtTy = Type::getInt64Ty(C); 1675 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 1676 ExtTy->getPrimitiveSizeInBits(); 1677 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy); 1678 Rep = Builder.CreateVectorSplat(NumElts, Rep); 1679 } else if (IsX86 && (Name == "sse.sqrt.ss" || 1680 Name == "sse2.sqrt.sd")) { 1681 Value *Vec = CI->getArgOperand(0); 1682 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0); 1683 Function *Intr = Intrinsic::getDeclaration(F->getParent(), 1684 Intrinsic::sqrt, Elt0->getType()); 1685 Elt0 = Builder.CreateCall(Intr, Elt0); 1686 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0); 1687 } else if (IsX86 && (Name.startswith("avx.sqrt.p") || 1688 Name.startswith("sse2.sqrt.p") || 1689 Name.startswith("sse.sqrt.p"))) { 1690 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1691 Intrinsic::sqrt, 1692 CI->getType()), 1693 {CI->getArgOperand(0)}); 1694 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) { 1695 if (CI->getNumArgOperands() == 4 && 1696 (!isa<ConstantInt>(CI->getArgOperand(3)) || 1697 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 1698 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512 1699 : Intrinsic::x86_avx512_sqrt_pd_512; 1700 1701 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) }; 1702 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 1703 IID), Args); 1704 } else { 1705 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1706 Intrinsic::sqrt, 1707 CI->getType()), 1708 {CI->getArgOperand(0)}); 1709 } 1710 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1711 CI->getArgOperand(1)); 1712 } else if (IsX86 && (Name.startswith("avx512.ptestm") || 1713 Name.startswith("avx512.ptestnm"))) { 1714 Value *Op0 = CI->getArgOperand(0); 1715 Value *Op1 = CI->getArgOperand(1); 1716 Value *Mask = CI->getArgOperand(2); 1717 Rep = Builder.CreateAnd(Op0, Op1); 1718 llvm::Type *Ty = Op0->getType(); 1719 Value *Zero = llvm::Constant::getNullValue(Ty); 1720 ICmpInst::Predicate Pred = 1721 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ; 1722 Rep = Builder.CreateICmp(Pred, Rep, Zero); 1723 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); 1724 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ 1725 unsigned NumElts = 1726 CI->getArgOperand(1)->getType()->getVectorNumElements(); 1727 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); 1728 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1729 CI->getArgOperand(1)); 1730 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) { 1731 unsigned NumElts = CI->getType()->getScalarSizeInBits(); 1732 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts); 1733 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts); 1734 uint32_t Indices[64]; 1735 for (unsigned i = 0; i != NumElts; ++i) 1736 Indices[i] = i; 1737 1738 // First extract half of each vector. This gives better codegen than 1739 // doing it in a single shuffle. 1740 LHS = Builder.CreateShuffleVector(LHS, LHS, 1741 makeArrayRef(Indices, NumElts / 2)); 1742 RHS = Builder.CreateShuffleVector(RHS, RHS, 1743 makeArrayRef(Indices, NumElts / 2)); 1744 // Concat the vectors. 1745 // NOTE: Operands have to be swapped to match intrinsic definition. 1746 Rep = Builder.CreateShuffleVector(RHS, LHS, 1747 makeArrayRef(Indices, NumElts)); 1748 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1749 } else if (IsX86 && Name == "avx512.kand.w") { 1750 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1751 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1752 Rep = Builder.CreateAnd(LHS, RHS); 1753 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1754 } else if (IsX86 && Name == "avx512.kandn.w") { 1755 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1756 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1757 LHS = Builder.CreateNot(LHS); 1758 Rep = Builder.CreateAnd(LHS, RHS); 1759 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1760 } else if (IsX86 && Name == "avx512.kor.w") { 1761 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1762 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1763 Rep = Builder.CreateOr(LHS, RHS); 1764 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1765 } else if (IsX86 && Name == "avx512.kxor.w") { 1766 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1767 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1768 Rep = Builder.CreateXor(LHS, RHS); 1769 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1770 } else if (IsX86 && Name == "avx512.kxnor.w") { 1771 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1772 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1773 LHS = Builder.CreateNot(LHS); 1774 Rep = Builder.CreateXor(LHS, RHS); 1775 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1776 } else if (IsX86 && Name == "avx512.knot.w") { 1777 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1778 Rep = Builder.CreateNot(Rep); 1779 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1780 } else if (IsX86 && 1781 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) { 1782 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1783 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1784 Rep = Builder.CreateOr(LHS, RHS); 1785 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty()); 1786 Value *C; 1787 if (Name[14] == 'c') 1788 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty()); 1789 else 1790 C = ConstantInt::getNullValue(Builder.getInt16Ty()); 1791 Rep = Builder.CreateICmpEQ(Rep, C); 1792 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty()); 1793 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" || 1794 Name == "sse.sub.ss" || Name == "sse2.sub.sd" || 1795 Name == "sse.mul.ss" || Name == "sse2.mul.sd" || 1796 Name == "sse.div.ss" || Name == "sse2.div.sd")) { 1797 Type *I32Ty = Type::getInt32Ty(C); 1798 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1799 ConstantInt::get(I32Ty, 0)); 1800 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1801 ConstantInt::get(I32Ty, 0)); 1802 Value *EltOp; 1803 if (Name.contains(".add.")) 1804 EltOp = Builder.CreateFAdd(Elt0, Elt1); 1805 else if (Name.contains(".sub.")) 1806 EltOp = Builder.CreateFSub(Elt0, Elt1); 1807 else if (Name.contains(".mul.")) 1808 EltOp = Builder.CreateFMul(Elt0, Elt1); 1809 else 1810 EltOp = Builder.CreateFDiv(Elt0, Elt1); 1811 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp, 1812 ConstantInt::get(I32Ty, 0)); 1813 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 1814 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 1815 bool CmpEq = Name[16] == 'e'; 1816 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true); 1817 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) { 1818 Type *OpTy = CI->getArgOperand(0)->getType(); 1819 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1820 Intrinsic::ID IID; 1821 switch (VecWidth) { 1822 default: llvm_unreachable("Unexpected intrinsic"); 1823 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break; 1824 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break; 1825 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break; 1826 } 1827 1828 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1829 { CI->getOperand(0), CI->getArgOperand(1) }); 1830 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1831 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) { 1832 Type *OpTy = CI->getArgOperand(0)->getType(); 1833 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1834 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1835 Intrinsic::ID IID; 1836 if (VecWidth == 128 && EltWidth == 32) 1837 IID = Intrinsic::x86_avx512_fpclass_ps_128; 1838 else if (VecWidth == 256 && EltWidth == 32) 1839 IID = Intrinsic::x86_avx512_fpclass_ps_256; 1840 else if (VecWidth == 512 && EltWidth == 32) 1841 IID = Intrinsic::x86_avx512_fpclass_ps_512; 1842 else if (VecWidth == 128 && EltWidth == 64) 1843 IID = Intrinsic::x86_avx512_fpclass_pd_128; 1844 else if (VecWidth == 256 && EltWidth == 64) 1845 IID = Intrinsic::x86_avx512_fpclass_pd_256; 1846 else if (VecWidth == 512 && EltWidth == 64) 1847 IID = Intrinsic::x86_avx512_fpclass_pd_512; 1848 else 1849 llvm_unreachable("Unexpected intrinsic"); 1850 1851 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1852 { CI->getOperand(0), CI->getArgOperand(1) }); 1853 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1854 } else if (IsX86 && Name.startswith("avx512.mask.cmp.p")) { 1855 Type *OpTy = CI->getArgOperand(0)->getType(); 1856 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1857 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1858 Intrinsic::ID IID; 1859 if (VecWidth == 128 && EltWidth == 32) 1860 IID = Intrinsic::x86_avx512_cmp_ps_128; 1861 else if (VecWidth == 256 && EltWidth == 32) 1862 IID = Intrinsic::x86_avx512_cmp_ps_256; 1863 else if (VecWidth == 512 && EltWidth == 32) 1864 IID = Intrinsic::x86_avx512_cmp_ps_512; 1865 else if (VecWidth == 128 && EltWidth == 64) 1866 IID = Intrinsic::x86_avx512_cmp_pd_128; 1867 else if (VecWidth == 256 && EltWidth == 64) 1868 IID = Intrinsic::x86_avx512_cmp_pd_256; 1869 else if (VecWidth == 512 && EltWidth == 64) 1870 IID = Intrinsic::x86_avx512_cmp_pd_512; 1871 else 1872 llvm_unreachable("Unexpected intrinsic"); 1873 1874 SmallVector<Value *, 4> Args; 1875 Args.push_back(CI->getArgOperand(0)); 1876 Args.push_back(CI->getArgOperand(1)); 1877 Args.push_back(CI->getArgOperand(2)); 1878 if (CI->getNumArgOperands() == 5) 1879 Args.push_back(CI->getArgOperand(4)); 1880 1881 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1882 Args); 1883 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(3)); 1884 } else if (IsX86 && Name.startswith("avx512.mask.cmp.") && 1885 Name[16] != 'p') { 1886 // Integer compare intrinsics. 1887 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1888 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true); 1889 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) { 1890 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1891 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false); 1892 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") || 1893 Name.startswith("avx512.cvtw2mask.") || 1894 Name.startswith("avx512.cvtd2mask.") || 1895 Name.startswith("avx512.cvtq2mask."))) { 1896 Value *Op = CI->getArgOperand(0); 1897 Value *Zero = llvm::Constant::getNullValue(Op->getType()); 1898 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero); 1899 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr); 1900 } else if(IsX86 && (Name == "ssse3.pabs.b.128" || 1901 Name == "ssse3.pabs.w.128" || 1902 Name == "ssse3.pabs.d.128" || 1903 Name.startswith("avx2.pabs") || 1904 Name.startswith("avx512.mask.pabs"))) { 1905 Rep = upgradeAbs(Builder, *CI); 1906 } else if (IsX86 && (Name == "sse41.pmaxsb" || 1907 Name == "sse2.pmaxs.w" || 1908 Name == "sse41.pmaxsd" || 1909 Name.startswith("avx2.pmaxs") || 1910 Name.startswith("avx512.mask.pmaxs"))) { 1911 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 1912 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 1913 Name == "sse41.pmaxuw" || 1914 Name == "sse41.pmaxud" || 1915 Name.startswith("avx2.pmaxu") || 1916 Name.startswith("avx512.mask.pmaxu"))) { 1917 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 1918 } else if (IsX86 && (Name == "sse41.pminsb" || 1919 Name == "sse2.pmins.w" || 1920 Name == "sse41.pminsd" || 1921 Name.startswith("avx2.pmins") || 1922 Name.startswith("avx512.mask.pmins"))) { 1923 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 1924 } else if (IsX86 && (Name == "sse2.pminu.b" || 1925 Name == "sse41.pminuw" || 1926 Name == "sse41.pminud" || 1927 Name.startswith("avx2.pminu") || 1928 Name.startswith("avx512.mask.pminu"))) { 1929 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 1930 } else if (IsX86 && (Name == "sse2.pmulu.dq" || 1931 Name == "avx2.pmulu.dq" || 1932 Name == "avx512.pmulu.dq.512" || 1933 Name.startswith("avx512.mask.pmulu.dq."))) { 1934 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false); 1935 } else if (IsX86 && (Name == "sse41.pmuldq" || 1936 Name == "avx2.pmul.dq" || 1937 Name == "avx512.pmul.dq.512" || 1938 Name.startswith("avx512.mask.pmul.dq."))) { 1939 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true); 1940 } else if (IsX86 && (Name == "sse.cvtsi2ss" || 1941 Name == "sse2.cvtsi2sd" || 1942 Name == "sse.cvtsi642ss" || 1943 Name == "sse2.cvtsi642sd")) { 1944 Rep = Builder.CreateSIToFP(CI->getArgOperand(1), 1945 CI->getType()->getVectorElementType()); 1946 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1947 } else if (IsX86 && Name == "avx512.cvtusi2sd") { 1948 Rep = Builder.CreateUIToFP(CI->getArgOperand(1), 1949 CI->getType()->getVectorElementType()); 1950 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1951 } else if (IsX86 && Name == "sse2.cvtss2sd") { 1952 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0); 1953 Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType()); 1954 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1955 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 1956 Name == "sse2.cvtdq2ps" || 1957 Name == "avx.cvtdq2.pd.256" || 1958 Name == "avx.cvtdq2.ps.256" || 1959 Name.startswith("avx512.mask.cvtdq2pd.") || 1960 Name.startswith("avx512.mask.cvtudq2pd.") || 1961 Name == "avx512.mask.cvtdq2ps.128" || 1962 Name == "avx512.mask.cvtdq2ps.256" || 1963 Name == "avx512.mask.cvtudq2ps.128" || 1964 Name == "avx512.mask.cvtudq2ps.256" || 1965 Name == "avx512.mask.cvtqq2pd.128" || 1966 Name == "avx512.mask.cvtqq2pd.256" || 1967 Name == "avx512.mask.cvtuqq2pd.128" || 1968 Name == "avx512.mask.cvtuqq2pd.256" || 1969 Name == "sse2.cvtps2pd" || 1970 Name == "avx.cvt.ps2.pd.256" || 1971 Name == "avx512.mask.cvtps2pd.128" || 1972 Name == "avx512.mask.cvtps2pd.256")) { 1973 Type *DstTy = CI->getType(); 1974 Rep = CI->getArgOperand(0); 1975 1976 unsigned NumDstElts = DstTy->getVectorNumElements(); 1977 if (NumDstElts < Rep->getType()->getVectorNumElements()) { 1978 assert(NumDstElts == 2 && "Unexpected vector size"); 1979 uint32_t ShuffleMask[2] = { 0, 1 }; 1980 Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); 1981 } 1982 1983 bool IsPS2PD = (StringRef::npos != Name.find("ps2")); 1984 bool IsUnsigned = (StringRef::npos != Name.find("cvtu")); 1985 if (IsPS2PD) 1986 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 1987 else if (IsUnsigned) 1988 Rep = Builder.CreateUIToFP(Rep, DstTy, "cvt"); 1989 else 1990 Rep = Builder.CreateSIToFP(Rep, DstTy, "cvt"); 1991 1992 if (CI->getNumArgOperands() == 3) 1993 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1994 CI->getArgOperand(1)); 1995 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 1996 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1997 CI->getArgOperand(1), CI->getArgOperand(2), 1998 /*Aligned*/false); 1999 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 2000 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2001 CI->getArgOperand(1),CI->getArgOperand(2), 2002 /*Aligned*/true); 2003 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { 2004 Type *ResultTy = CI->getType(); 2005 Type *PtrTy = ResultTy->getVectorElementType(); 2006 2007 // Cast the pointer to element type. 2008 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2009 llvm::PointerType::getUnqual(PtrTy)); 2010 2011 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2012 ResultTy->getVectorNumElements()); 2013 2014 Function *ELd = Intrinsic::getDeclaration(F->getParent(), 2015 Intrinsic::masked_expandload, 2016 ResultTy); 2017 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); 2018 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { 2019 Type *ResultTy = CI->getArgOperand(1)->getType(); 2020 Type *PtrTy = ResultTy->getVectorElementType(); 2021 2022 // Cast the pointer to element type. 2023 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2024 llvm::PointerType::getUnqual(PtrTy)); 2025 2026 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2027 ResultTy->getVectorNumElements()); 2028 2029 Function *CSt = Intrinsic::getDeclaration(F->getParent(), 2030 Intrinsic::masked_compressstore, 2031 ResultTy); 2032 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); 2033 } else if (IsX86 && Name.startswith("xop.vpcom")) { 2034 bool IsSigned; 2035 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") || 2036 Name.endswith("uq")) 2037 IsSigned = false; 2038 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") || 2039 Name.endswith("q")) 2040 IsSigned = true; 2041 else 2042 llvm_unreachable("Unknown suffix"); 2043 2044 unsigned Imm; 2045 if (CI->getNumArgOperands() == 3) { 2046 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2047 } else { 2048 Name = Name.substr(9); // strip off "xop.vpcom" 2049 if (Name.startswith("lt")) 2050 Imm = 0; 2051 else if (Name.startswith("le")) 2052 Imm = 1; 2053 else if (Name.startswith("gt")) 2054 Imm = 2; 2055 else if (Name.startswith("ge")) 2056 Imm = 3; 2057 else if (Name.startswith("eq")) 2058 Imm = 4; 2059 else if (Name.startswith("ne")) 2060 Imm = 5; 2061 else if (Name.startswith("false")) 2062 Imm = 6; 2063 else if (Name.startswith("true")) 2064 Imm = 7; 2065 else 2066 llvm_unreachable("Unknown condition"); 2067 } 2068 2069 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned); 2070 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 2071 Value *Sel = CI->getArgOperand(2); 2072 Value *NotSel = Builder.CreateNot(Sel); 2073 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 2074 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 2075 Rep = Builder.CreateOr(Sel0, Sel1); 2076 } else if (IsX86 && (Name.startswith("xop.vprot") || 2077 Name.startswith("avx512.prol") || 2078 Name.startswith("avx512.mask.prol"))) { 2079 Rep = upgradeX86Rotate(Builder, *CI, false); 2080 } else if (IsX86 && (Name.startswith("avx512.pror") || 2081 Name.startswith("avx512.mask.pror"))) { 2082 Rep = upgradeX86Rotate(Builder, *CI, true); 2083 } else if (IsX86 && (Name.startswith("avx512.vpshld.") || 2084 Name.startswith("avx512.mask.vpshld") || 2085 Name.startswith("avx512.maskz.vpshld"))) { 2086 bool ZeroMask = Name[11] == 'z'; 2087 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask); 2088 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") || 2089 Name.startswith("avx512.mask.vpshrd") || 2090 Name.startswith("avx512.maskz.vpshrd"))) { 2091 bool ZeroMask = Name[11] == 'z'; 2092 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask); 2093 } else if (IsX86 && Name == "sse42.crc32.64.8") { 2094 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 2095 Intrinsic::x86_sse42_crc32_32_8); 2096 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 2097 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 2098 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 2099 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || 2100 Name.startswith("avx512.vbroadcast.s"))) { 2101 // Replace broadcasts with a series of insertelements. 2102 Type *VecTy = CI->getType(); 2103 Type *EltTy = VecTy->getVectorElementType(); 2104 unsigned EltNum = VecTy->getVectorNumElements(); 2105 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 2106 EltTy->getPointerTo()); 2107 Value *Load = Builder.CreateLoad(EltTy, Cast); 2108 Type *I32Ty = Type::getInt32Ty(C); 2109 Rep = UndefValue::get(VecTy); 2110 for (unsigned I = 0; I < EltNum; ++I) 2111 Rep = Builder.CreateInsertElement(Rep, Load, 2112 ConstantInt::get(I32Ty, I)); 2113 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 2114 Name.startswith("sse41.pmovzx") || 2115 Name.startswith("avx2.pmovsx") || 2116 Name.startswith("avx2.pmovzx") || 2117 Name.startswith("avx512.mask.pmovsx") || 2118 Name.startswith("avx512.mask.pmovzx"))) { 2119 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 2120 VectorType *DstTy = cast<VectorType>(CI->getType()); 2121 unsigned NumDstElts = DstTy->getNumElements(); 2122 2123 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 2124 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2125 for (unsigned i = 0; i != NumDstElts; ++i) 2126 ShuffleMask[i] = i; 2127 2128 Value *SV = Builder.CreateShuffleVector( 2129 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 2130 2131 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 2132 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 2133 : Builder.CreateZExt(SV, DstTy); 2134 // If there are 3 arguments, it's a masked intrinsic so we need a select. 2135 if (CI->getNumArgOperands() == 3) 2136 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2137 CI->getArgOperand(1)); 2138 } else if (Name == "avx512.mask.pmov.qd.256" || 2139 Name == "avx512.mask.pmov.qd.512" || 2140 Name == "avx512.mask.pmov.wb.256" || 2141 Name == "avx512.mask.pmov.wb.512") { 2142 Type *Ty = CI->getArgOperand(1)->getType(); 2143 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty); 2144 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2145 CI->getArgOperand(1)); 2146 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 2147 Name == "avx2.vbroadcasti128")) { 2148 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 2149 Type *EltTy = CI->getType()->getVectorElementType(); 2150 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 2151 Type *VT = VectorType::get(EltTy, NumSrcElts); 2152 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 2153 PointerType::getUnqual(VT)); 2154 Value *Load = Builder.CreateAlignedLoad(Op, 1); 2155 if (NumSrcElts == 2) 2156 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2157 { 0, 1, 0, 1 }); 2158 else 2159 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2160 { 0, 1, 2, 3, 0, 1, 2, 3 }); 2161 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") || 2162 Name.startswith("avx512.mask.shuf.f"))) { 2163 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2164 Type *VT = CI->getType(); 2165 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128; 2166 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits(); 2167 unsigned ControlBitsMask = NumLanes - 1; 2168 unsigned NumControlBits = NumLanes / 2; 2169 SmallVector<uint32_t, 8> ShuffleMask(0); 2170 2171 for (unsigned l = 0; l != NumLanes; ++l) { 2172 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask; 2173 // We actually need the other source. 2174 if (l >= NumLanes / 2) 2175 LaneMask += NumLanes; 2176 for (unsigned i = 0; i != NumElementsInLane; ++i) 2177 ShuffleMask.push_back(LaneMask * NumElementsInLane + i); 2178 } 2179 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2180 CI->getArgOperand(1), ShuffleMask); 2181 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2182 CI->getArgOperand(3)); 2183 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || 2184 Name.startswith("avx512.mask.broadcasti"))) { 2185 unsigned NumSrcElts = 2186 CI->getArgOperand(0)->getType()->getVectorNumElements(); 2187 unsigned NumDstElts = CI->getType()->getVectorNumElements(); 2188 2189 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2190 for (unsigned i = 0; i != NumDstElts; ++i) 2191 ShuffleMask[i] = i % NumSrcElts; 2192 2193 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2194 CI->getArgOperand(0), 2195 ShuffleMask); 2196 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2197 CI->getArgOperand(1)); 2198 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 2199 Name.startswith("avx2.vbroadcast") || 2200 Name.startswith("avx512.pbroadcast") || 2201 Name.startswith("avx512.mask.broadcast.s"))) { 2202 // Replace vp?broadcasts with a vector shuffle. 2203 Value *Op = CI->getArgOperand(0); 2204 unsigned NumElts = CI->getType()->getVectorNumElements(); 2205 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 2206 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 2207 Constant::getNullValue(MaskTy)); 2208 2209 if (CI->getNumArgOperands() == 3) 2210 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2211 CI->getArgOperand(1)); 2212 } else if (IsX86 && (Name.startswith("sse2.padds.") || 2213 Name.startswith("sse2.psubs.") || 2214 Name.startswith("avx2.padds.") || 2215 Name.startswith("avx2.psubs.") || 2216 Name.startswith("avx512.padds.") || 2217 Name.startswith("avx512.psubs.") || 2218 Name.startswith("avx512.mask.padds.") || 2219 Name.startswith("avx512.mask.psubs."))) { 2220 bool IsAdd = Name.contains(".padds"); 2221 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, true, IsAdd); 2222 } else if (IsX86 && (Name.startswith("sse2.paddus.") || 2223 Name.startswith("sse2.psubus.") || 2224 Name.startswith("avx2.paddus.") || 2225 Name.startswith("avx2.psubus.") || 2226 Name.startswith("avx512.mask.paddus.") || 2227 Name.startswith("avx512.mask.psubus."))) { 2228 bool IsAdd = Name.contains(".paddus"); 2229 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, false, IsAdd); 2230 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 2231 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2232 CI->getArgOperand(1), 2233 CI->getArgOperand(2), 2234 CI->getArgOperand(3), 2235 CI->getArgOperand(4), 2236 false); 2237 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 2238 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2239 CI->getArgOperand(1), 2240 CI->getArgOperand(2), 2241 CI->getArgOperand(3), 2242 CI->getArgOperand(4), 2243 true); 2244 } else if (IsX86 && (Name == "sse2.psll.dq" || 2245 Name == "avx2.psll.dq")) { 2246 // 128/256-bit shift left specified in bits. 2247 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2248 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 2249 Shift / 8); // Shift is in bits. 2250 } else if (IsX86 && (Name == "sse2.psrl.dq" || 2251 Name == "avx2.psrl.dq")) { 2252 // 128/256-bit shift right specified in bits. 2253 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2254 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 2255 Shift / 8); // Shift is in bits. 2256 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 2257 Name == "avx2.psll.dq.bs" || 2258 Name == "avx512.psll.dq.512")) { 2259 // 128/256/512-bit shift left specified in bytes. 2260 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2261 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2262 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 2263 Name == "avx2.psrl.dq.bs" || 2264 Name == "avx512.psrl.dq.512")) { 2265 // 128/256/512-bit shift right specified in bytes. 2266 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2267 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2268 } else if (IsX86 && (Name == "sse41.pblendw" || 2269 Name.startswith("sse41.blendp") || 2270 Name.startswith("avx.blend.p") || 2271 Name == "avx2.pblendw" || 2272 Name.startswith("avx2.pblendd."))) { 2273 Value *Op0 = CI->getArgOperand(0); 2274 Value *Op1 = CI->getArgOperand(1); 2275 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2276 VectorType *VecTy = cast<VectorType>(CI->getType()); 2277 unsigned NumElts = VecTy->getNumElements(); 2278 2279 SmallVector<uint32_t, 16> Idxs(NumElts); 2280 for (unsigned i = 0; i != NumElts; ++i) 2281 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 2282 2283 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2284 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 2285 Name == "avx2.vinserti128" || 2286 Name.startswith("avx512.mask.insert"))) { 2287 Value *Op0 = CI->getArgOperand(0); 2288 Value *Op1 = CI->getArgOperand(1); 2289 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2290 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2291 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 2292 unsigned Scale = DstNumElts / SrcNumElts; 2293 2294 // Mask off the high bits of the immediate value; hardware ignores those. 2295 Imm = Imm % Scale; 2296 2297 // Extend the second operand into a vector the size of the destination. 2298 Value *UndefV = UndefValue::get(Op1->getType()); 2299 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2300 for (unsigned i = 0; i != SrcNumElts; ++i) 2301 Idxs[i] = i; 2302 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 2303 Idxs[i] = SrcNumElts; 2304 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 2305 2306 // Insert the second operand into the first operand. 2307 2308 // Note that there is no guarantee that instruction lowering will actually 2309 // produce a vinsertf128 instruction for the created shuffles. In 2310 // particular, the 0 immediate case involves no lane changes, so it can 2311 // be handled as a blend. 2312 2313 // Example of shuffle mask for 32-bit elements: 2314 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 2315 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 2316 2317 // First fill with identify mask. 2318 for (unsigned i = 0; i != DstNumElts; ++i) 2319 Idxs[i] = i; 2320 // Then replace the elements where we need to insert. 2321 for (unsigned i = 0; i != SrcNumElts; ++i) 2322 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 2323 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 2324 2325 // If the intrinsic has a mask operand, handle that. 2326 if (CI->getNumArgOperands() == 5) 2327 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2328 CI->getArgOperand(3)); 2329 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 2330 Name == "avx2.vextracti128" || 2331 Name.startswith("avx512.mask.vextract"))) { 2332 Value *Op0 = CI->getArgOperand(0); 2333 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2334 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2335 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 2336 unsigned Scale = SrcNumElts / DstNumElts; 2337 2338 // Mask off the high bits of the immediate value; hardware ignores those. 2339 Imm = Imm % Scale; 2340 2341 // Get indexes for the subvector of the input vector. 2342 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2343 for (unsigned i = 0; i != DstNumElts; ++i) { 2344 Idxs[i] = i + (Imm * DstNumElts); 2345 } 2346 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2347 2348 // If the intrinsic has a mask operand, handle that. 2349 if (CI->getNumArgOperands() == 4) 2350 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2351 CI->getArgOperand(2)); 2352 } else if (!IsX86 && Name == "stackprotectorcheck") { 2353 Rep = nullptr; 2354 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 2355 Name.startswith("avx512.mask.perm.di."))) { 2356 Value *Op0 = CI->getArgOperand(0); 2357 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2358 VectorType *VecTy = cast<VectorType>(CI->getType()); 2359 unsigned NumElts = VecTy->getNumElements(); 2360 2361 SmallVector<uint32_t, 8> Idxs(NumElts); 2362 for (unsigned i = 0; i != NumElts; ++i) 2363 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 2364 2365 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2366 2367 if (CI->getNumArgOperands() == 4) 2368 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2369 CI->getArgOperand(2)); 2370 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") || 2371 Name == "avx2.vperm2i128")) { 2372 // The immediate permute control byte looks like this: 2373 // [1:0] - select 128 bits from sources for low half of destination 2374 // [2] - ignore 2375 // [3] - zero low half of destination 2376 // [5:4] - select 128 bits from sources for high half of destination 2377 // [6] - ignore 2378 // [7] - zero high half of destination 2379 2380 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2381 2382 unsigned NumElts = CI->getType()->getVectorNumElements(); 2383 unsigned HalfSize = NumElts / 2; 2384 SmallVector<uint32_t, 8> ShuffleMask(NumElts); 2385 2386 // Determine which operand(s) are actually in use for this instruction. 2387 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2388 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2389 2390 // If needed, replace operands based on zero mask. 2391 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0; 2392 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1; 2393 2394 // Permute low half of result. 2395 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0; 2396 for (unsigned i = 0; i < HalfSize; ++i) 2397 ShuffleMask[i] = StartIndex + i; 2398 2399 // Permute high half of result. 2400 StartIndex = (Imm & 0x10) ? HalfSize : 0; 2401 for (unsigned i = 0; i < HalfSize; ++i) 2402 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i; 2403 2404 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2405 2406 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 2407 Name == "sse2.pshuf.d" || 2408 Name.startswith("avx512.mask.vpermil.p") || 2409 Name.startswith("avx512.mask.pshuf.d."))) { 2410 Value *Op0 = CI->getArgOperand(0); 2411 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2412 VectorType *VecTy = cast<VectorType>(CI->getType()); 2413 unsigned NumElts = VecTy->getNumElements(); 2414 // Calculate the size of each index in the immediate. 2415 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 2416 unsigned IdxMask = ((1 << IdxSize) - 1); 2417 2418 SmallVector<uint32_t, 8> Idxs(NumElts); 2419 // Lookup the bits for this element, wrapping around the immediate every 2420 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 2421 // to offset by the first index of each group. 2422 for (unsigned i = 0; i != NumElts; ++i) 2423 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 2424 2425 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2426 2427 if (CI->getNumArgOperands() == 4) 2428 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2429 CI->getArgOperand(2)); 2430 } else if (IsX86 && (Name == "sse2.pshufl.w" || 2431 Name.startswith("avx512.mask.pshufl.w."))) { 2432 Value *Op0 = CI->getArgOperand(0); 2433 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2434 unsigned NumElts = CI->getType()->getVectorNumElements(); 2435 2436 SmallVector<uint32_t, 16> Idxs(NumElts); 2437 for (unsigned l = 0; l != NumElts; l += 8) { 2438 for (unsigned i = 0; i != 4; ++i) 2439 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 2440 for (unsigned i = 4; i != 8; ++i) 2441 Idxs[i + l] = i + l; 2442 } 2443 2444 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2445 2446 if (CI->getNumArgOperands() == 4) 2447 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2448 CI->getArgOperand(2)); 2449 } else if (IsX86 && (Name == "sse2.pshufh.w" || 2450 Name.startswith("avx512.mask.pshufh.w."))) { 2451 Value *Op0 = CI->getArgOperand(0); 2452 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2453 unsigned NumElts = CI->getType()->getVectorNumElements(); 2454 2455 SmallVector<uint32_t, 16> Idxs(NumElts); 2456 for (unsigned l = 0; l != NumElts; l += 8) { 2457 for (unsigned i = 0; i != 4; ++i) 2458 Idxs[i + l] = i + l; 2459 for (unsigned i = 0; i != 4; ++i) 2460 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 2461 } 2462 2463 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2464 2465 if (CI->getNumArgOperands() == 4) 2466 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2467 CI->getArgOperand(2)); 2468 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 2469 Value *Op0 = CI->getArgOperand(0); 2470 Value *Op1 = CI->getArgOperand(1); 2471 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2472 unsigned NumElts = CI->getType()->getVectorNumElements(); 2473 2474 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2475 unsigned HalfLaneElts = NumLaneElts / 2; 2476 2477 SmallVector<uint32_t, 16> Idxs(NumElts); 2478 for (unsigned i = 0; i != NumElts; ++i) { 2479 // Base index is the starting element of the lane. 2480 Idxs[i] = i - (i % NumLaneElts); 2481 // If we are half way through the lane switch to the other source. 2482 if ((i % NumLaneElts) >= HalfLaneElts) 2483 Idxs[i] += NumElts; 2484 // Now select the specific element. By adding HalfLaneElts bits from 2485 // the immediate. Wrapping around the immediate every 8-bits. 2486 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 2487 } 2488 2489 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2490 2491 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2492 CI->getArgOperand(3)); 2493 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 2494 Name.startswith("avx512.mask.movshdup") || 2495 Name.startswith("avx512.mask.movsldup"))) { 2496 Value *Op0 = CI->getArgOperand(0); 2497 unsigned NumElts = CI->getType()->getVectorNumElements(); 2498 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2499 2500 unsigned Offset = 0; 2501 if (Name.startswith("avx512.mask.movshdup.")) 2502 Offset = 1; 2503 2504 SmallVector<uint32_t, 16> Idxs(NumElts); 2505 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 2506 for (unsigned i = 0; i != NumLaneElts; i += 2) { 2507 Idxs[i + l + 0] = i + l + Offset; 2508 Idxs[i + l + 1] = i + l + Offset; 2509 } 2510 2511 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2512 2513 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2514 CI->getArgOperand(1)); 2515 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 2516 Name.startswith("avx512.mask.unpckl."))) { 2517 Value *Op0 = CI->getArgOperand(0); 2518 Value *Op1 = CI->getArgOperand(1); 2519 int NumElts = CI->getType()->getVectorNumElements(); 2520 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2521 2522 SmallVector<uint32_t, 64> Idxs(NumElts); 2523 for (int l = 0; l != NumElts; l += NumLaneElts) 2524 for (int i = 0; i != NumLaneElts; ++i) 2525 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 2526 2527 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2528 2529 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2530 CI->getArgOperand(2)); 2531 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 2532 Name.startswith("avx512.mask.unpckh."))) { 2533 Value *Op0 = CI->getArgOperand(0); 2534 Value *Op1 = CI->getArgOperand(1); 2535 int NumElts = CI->getType()->getVectorNumElements(); 2536 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2537 2538 SmallVector<uint32_t, 64> Idxs(NumElts); 2539 for (int l = 0; l != NumElts; l += NumLaneElts) 2540 for (int i = 0; i != NumLaneElts; ++i) 2541 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 2542 2543 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2544 2545 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2546 CI->getArgOperand(2)); 2547 } else if (IsX86 && (Name.startswith("avx512.mask.and.") || 2548 Name.startswith("avx512.mask.pand."))) { 2549 VectorType *FTy = cast<VectorType>(CI->getType()); 2550 VectorType *ITy = VectorType::getInteger(FTy); 2551 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2552 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2553 Rep = Builder.CreateBitCast(Rep, FTy); 2554 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2555 CI->getArgOperand(2)); 2556 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") || 2557 Name.startswith("avx512.mask.pandn."))) { 2558 VectorType *FTy = cast<VectorType>(CI->getType()); 2559 VectorType *ITy = VectorType::getInteger(FTy); 2560 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 2561 Rep = Builder.CreateAnd(Rep, 2562 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2563 Rep = Builder.CreateBitCast(Rep, FTy); 2564 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2565 CI->getArgOperand(2)); 2566 } else if (IsX86 && (Name.startswith("avx512.mask.or.") || 2567 Name.startswith("avx512.mask.por."))) { 2568 VectorType *FTy = cast<VectorType>(CI->getType()); 2569 VectorType *ITy = VectorType::getInteger(FTy); 2570 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2571 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2572 Rep = Builder.CreateBitCast(Rep, FTy); 2573 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2574 CI->getArgOperand(2)); 2575 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") || 2576 Name.startswith("avx512.mask.pxor."))) { 2577 VectorType *FTy = cast<VectorType>(CI->getType()); 2578 VectorType *ITy = VectorType::getInteger(FTy); 2579 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2580 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2581 Rep = Builder.CreateBitCast(Rep, FTy); 2582 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2583 CI->getArgOperand(2)); 2584 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 2585 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2586 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2587 CI->getArgOperand(2)); 2588 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 2589 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2590 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2591 CI->getArgOperand(2)); 2592 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 2593 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2594 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2595 CI->getArgOperand(2)); 2596 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) { 2597 if (Name.endswith(".512")) { 2598 Intrinsic::ID IID; 2599 if (Name[17] == 's') 2600 IID = Intrinsic::x86_avx512_add_ps_512; 2601 else 2602 IID = Intrinsic::x86_avx512_add_pd_512; 2603 2604 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2605 { CI->getArgOperand(0), CI->getArgOperand(1), 2606 CI->getArgOperand(4) }); 2607 } else { 2608 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2609 } 2610 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2611 CI->getArgOperand(2)); 2612 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 2613 if (Name.endswith(".512")) { 2614 Intrinsic::ID IID; 2615 if (Name[17] == 's') 2616 IID = Intrinsic::x86_avx512_div_ps_512; 2617 else 2618 IID = Intrinsic::x86_avx512_div_pd_512; 2619 2620 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2621 { CI->getArgOperand(0), CI->getArgOperand(1), 2622 CI->getArgOperand(4) }); 2623 } else { 2624 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 2625 } 2626 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2627 CI->getArgOperand(2)); 2628 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 2629 if (Name.endswith(".512")) { 2630 Intrinsic::ID IID; 2631 if (Name[17] == 's') 2632 IID = Intrinsic::x86_avx512_mul_ps_512; 2633 else 2634 IID = Intrinsic::x86_avx512_mul_pd_512; 2635 2636 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2637 { CI->getArgOperand(0), CI->getArgOperand(1), 2638 CI->getArgOperand(4) }); 2639 } else { 2640 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2641 } 2642 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2643 CI->getArgOperand(2)); 2644 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 2645 if (Name.endswith(".512")) { 2646 Intrinsic::ID IID; 2647 if (Name[17] == 's') 2648 IID = Intrinsic::x86_avx512_sub_ps_512; 2649 else 2650 IID = Intrinsic::x86_avx512_sub_pd_512; 2651 2652 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2653 { CI->getArgOperand(0), CI->getArgOperand(1), 2654 CI->getArgOperand(4) }); 2655 } else { 2656 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2657 } 2658 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2659 CI->getArgOperand(2)); 2660 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") || 2661 Name.startswith("avx512.mask.min.p")) && 2662 Name.drop_front(18) == ".512") { 2663 bool IsDouble = Name[17] == 'd'; 2664 bool IsMin = Name[13] == 'i'; 2665 static const Intrinsic::ID MinMaxTbl[2][2] = { 2666 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 }, 2667 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 } 2668 }; 2669 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble]; 2670 2671 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2672 { CI->getArgOperand(0), CI->getArgOperand(1), 2673 CI->getArgOperand(4) }); 2674 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2675 CI->getArgOperand(2)); 2676 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 2677 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 2678 Intrinsic::ctlz, 2679 CI->getType()), 2680 { CI->getArgOperand(0), Builder.getInt1(false) }); 2681 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2682 CI->getArgOperand(1)); 2683 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 2684 bool IsImmediate = Name[16] == 'i' || 2685 (Name.size() > 18 && Name[18] == 'i'); 2686 bool IsVariable = Name[16] == 'v'; 2687 char Size = Name[16] == '.' ? Name[17] : 2688 Name[17] == '.' ? Name[18] : 2689 Name[18] == '.' ? Name[19] : 2690 Name[20]; 2691 2692 Intrinsic::ID IID; 2693 if (IsVariable && Name[17] != '.') { 2694 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 2695 IID = Intrinsic::x86_avx2_psllv_q; 2696 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 2697 IID = Intrinsic::x86_avx2_psllv_q_256; 2698 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 2699 IID = Intrinsic::x86_avx2_psllv_d; 2700 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 2701 IID = Intrinsic::x86_avx2_psllv_d_256; 2702 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 2703 IID = Intrinsic::x86_avx512_psllv_w_128; 2704 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 2705 IID = Intrinsic::x86_avx512_psllv_w_256; 2706 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 2707 IID = Intrinsic::x86_avx512_psllv_w_512; 2708 else 2709 llvm_unreachable("Unexpected size"); 2710 } else if (Name.endswith(".128")) { 2711 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 2712 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 2713 : Intrinsic::x86_sse2_psll_d; 2714 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 2715 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 2716 : Intrinsic::x86_sse2_psll_q; 2717 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 2718 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 2719 : Intrinsic::x86_sse2_psll_w; 2720 else 2721 llvm_unreachable("Unexpected size"); 2722 } else if (Name.endswith(".256")) { 2723 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 2724 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 2725 : Intrinsic::x86_avx2_psll_d; 2726 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 2727 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 2728 : Intrinsic::x86_avx2_psll_q; 2729 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 2730 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 2731 : Intrinsic::x86_avx2_psll_w; 2732 else 2733 llvm_unreachable("Unexpected size"); 2734 } else { 2735 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 2736 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 2737 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 2738 Intrinsic::x86_avx512_psll_d_512; 2739 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 2740 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 2741 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 2742 Intrinsic::x86_avx512_psll_q_512; 2743 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 2744 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 2745 : Intrinsic::x86_avx512_psll_w_512; 2746 else 2747 llvm_unreachable("Unexpected size"); 2748 } 2749 2750 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2751 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 2752 bool IsImmediate = Name[16] == 'i' || 2753 (Name.size() > 18 && Name[18] == 'i'); 2754 bool IsVariable = Name[16] == 'v'; 2755 char Size = Name[16] == '.' ? Name[17] : 2756 Name[17] == '.' ? Name[18] : 2757 Name[18] == '.' ? Name[19] : 2758 Name[20]; 2759 2760 Intrinsic::ID IID; 2761 if (IsVariable && Name[17] != '.') { 2762 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 2763 IID = Intrinsic::x86_avx2_psrlv_q; 2764 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 2765 IID = Intrinsic::x86_avx2_psrlv_q_256; 2766 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 2767 IID = Intrinsic::x86_avx2_psrlv_d; 2768 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 2769 IID = Intrinsic::x86_avx2_psrlv_d_256; 2770 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 2771 IID = Intrinsic::x86_avx512_psrlv_w_128; 2772 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 2773 IID = Intrinsic::x86_avx512_psrlv_w_256; 2774 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 2775 IID = Intrinsic::x86_avx512_psrlv_w_512; 2776 else 2777 llvm_unreachable("Unexpected size"); 2778 } else if (Name.endswith(".128")) { 2779 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 2780 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 2781 : Intrinsic::x86_sse2_psrl_d; 2782 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 2783 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 2784 : Intrinsic::x86_sse2_psrl_q; 2785 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 2786 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 2787 : Intrinsic::x86_sse2_psrl_w; 2788 else 2789 llvm_unreachable("Unexpected size"); 2790 } else if (Name.endswith(".256")) { 2791 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 2792 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 2793 : Intrinsic::x86_avx2_psrl_d; 2794 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 2795 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 2796 : Intrinsic::x86_avx2_psrl_q; 2797 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 2798 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 2799 : Intrinsic::x86_avx2_psrl_w; 2800 else 2801 llvm_unreachable("Unexpected size"); 2802 } else { 2803 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 2804 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 2805 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 2806 Intrinsic::x86_avx512_psrl_d_512; 2807 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 2808 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 2809 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 2810 Intrinsic::x86_avx512_psrl_q_512; 2811 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 2812 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 2813 : Intrinsic::x86_avx512_psrl_w_512; 2814 else 2815 llvm_unreachable("Unexpected size"); 2816 } 2817 2818 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2819 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 2820 bool IsImmediate = Name[16] == 'i' || 2821 (Name.size() > 18 && Name[18] == 'i'); 2822 bool IsVariable = Name[16] == 'v'; 2823 char Size = Name[16] == '.' ? Name[17] : 2824 Name[17] == '.' ? Name[18] : 2825 Name[18] == '.' ? Name[19] : 2826 Name[20]; 2827 2828 Intrinsic::ID IID; 2829 if (IsVariable && Name[17] != '.') { 2830 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 2831 IID = Intrinsic::x86_avx2_psrav_d; 2832 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 2833 IID = Intrinsic::x86_avx2_psrav_d_256; 2834 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 2835 IID = Intrinsic::x86_avx512_psrav_w_128; 2836 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 2837 IID = Intrinsic::x86_avx512_psrav_w_256; 2838 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 2839 IID = Intrinsic::x86_avx512_psrav_w_512; 2840 else 2841 llvm_unreachable("Unexpected size"); 2842 } else if (Name.endswith(".128")) { 2843 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 2844 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 2845 : Intrinsic::x86_sse2_psra_d; 2846 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 2847 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 2848 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 2849 Intrinsic::x86_avx512_psra_q_128; 2850 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 2851 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 2852 : Intrinsic::x86_sse2_psra_w; 2853 else 2854 llvm_unreachable("Unexpected size"); 2855 } else if (Name.endswith(".256")) { 2856 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 2857 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 2858 : Intrinsic::x86_avx2_psra_d; 2859 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 2860 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 2861 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 2862 Intrinsic::x86_avx512_psra_q_256; 2863 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 2864 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 2865 : Intrinsic::x86_avx2_psra_w; 2866 else 2867 llvm_unreachable("Unexpected size"); 2868 } else { 2869 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 2870 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 2871 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 2872 Intrinsic::x86_avx512_psra_d_512; 2873 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 2874 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 2875 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 2876 Intrinsic::x86_avx512_psra_q_512; 2877 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 2878 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 2879 : Intrinsic::x86_avx512_psra_w_512; 2880 else 2881 llvm_unreachable("Unexpected size"); 2882 } 2883 2884 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2885 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 2886 Rep = upgradeMaskedMove(Builder, *CI); 2887 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 2888 Rep = UpgradeMaskToInt(Builder, *CI); 2889 } else if (IsX86 && Name.endswith(".movntdqa")) { 2890 Module *M = F->getParent(); 2891 MDNode *Node = MDNode::get( 2892 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 2893 2894 Value *Ptr = CI->getArgOperand(0); 2895 VectorType *VTy = cast<VectorType>(CI->getType()); 2896 2897 // Convert the type of the pointer to a pointer to the stored type. 2898 Value *BC = 2899 Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); 2900 LoadInst *LI = Builder.CreateAlignedLoad(BC, VTy->getBitWidth() / 8); 2901 LI->setMetadata(M->getMDKindID("nontemporal"), Node); 2902 Rep = LI; 2903 } else if (IsX86 && 2904 (Name.startswith("sse2.pavg") || Name.startswith("avx2.pavg") || 2905 Name.startswith("avx512.mask.pavg"))) { 2906 // llvm.x86.sse2.pavg.b/w, llvm.x86.avx2.pavg.b/w, 2907 // llvm.x86.avx512.mask.pavg.b/w 2908 Value *A = CI->getArgOperand(0); 2909 Value *B = CI->getArgOperand(1); 2910 VectorType *ZextType = VectorType::getExtendedElementVectorType( 2911 cast<VectorType>(A->getType())); 2912 Value *ExtendedA = Builder.CreateZExt(A, ZextType); 2913 Value *ExtendedB = Builder.CreateZExt(B, ZextType); 2914 Value *Sum = Builder.CreateAdd(ExtendedA, ExtendedB); 2915 Value *AddOne = Builder.CreateAdd(Sum, ConstantInt::get(ZextType, 1)); 2916 Value *ShiftR = Builder.CreateLShr(AddOne, ConstantInt::get(ZextType, 1)); 2917 Rep = Builder.CreateTrunc(ShiftR, A->getType()); 2918 if (CI->getNumArgOperands() > 2) { 2919 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2920 CI->getArgOperand(2)); 2921 } 2922 } else if (IsX86 && (Name.startswith("fma.vfmadd.") || 2923 Name.startswith("fma.vfmsub.") || 2924 Name.startswith("fma.vfnmadd.") || 2925 Name.startswith("fma.vfnmsub."))) { 2926 bool NegMul = Name[6] == 'n'; 2927 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's'; 2928 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's'; 2929 2930 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2931 CI->getArgOperand(2) }; 2932 2933 if (IsScalar) { 2934 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 2935 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 2936 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 2937 } 2938 2939 if (NegMul && !IsScalar) 2940 Ops[0] = Builder.CreateFNeg(Ops[0]); 2941 if (NegMul && IsScalar) 2942 Ops[1] = Builder.CreateFNeg(Ops[1]); 2943 if (NegAcc) 2944 Ops[2] = Builder.CreateFNeg(Ops[2]); 2945 2946 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 2947 Intrinsic::fma, 2948 Ops[0]->getType()), 2949 Ops); 2950 2951 if (IsScalar) 2952 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, 2953 (uint64_t)0); 2954 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) { 2955 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2956 CI->getArgOperand(2) }; 2957 2958 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 2959 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 2960 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 2961 2962 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 2963 Intrinsic::fma, 2964 Ops[0]->getType()), 2965 Ops); 2966 2967 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()), 2968 Rep, (uint64_t)0); 2969 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") || 2970 Name.startswith("avx512.maskz.vfmadd.s") || 2971 Name.startswith("avx512.mask3.vfmadd.s") || 2972 Name.startswith("avx512.mask3.vfmsub.s") || 2973 Name.startswith("avx512.mask3.vfnmsub.s"))) { 2974 bool IsMask3 = Name[11] == '3'; 2975 bool IsMaskZ = Name[11] == 'z'; 2976 // Drop the "avx512.mask." to make it easier. 2977 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 2978 bool NegMul = Name[2] == 'n'; 2979 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 2980 2981 Value *A = CI->getArgOperand(0); 2982 Value *B = CI->getArgOperand(1); 2983 Value *C = CI->getArgOperand(2); 2984 2985 if (NegMul && (IsMask3 || IsMaskZ)) 2986 A = Builder.CreateFNeg(A); 2987 if (NegMul && !(IsMask3 || IsMaskZ)) 2988 B = Builder.CreateFNeg(B); 2989 if (NegAcc) 2990 C = Builder.CreateFNeg(C); 2991 2992 A = Builder.CreateExtractElement(A, (uint64_t)0); 2993 B = Builder.CreateExtractElement(B, (uint64_t)0); 2994 C = Builder.CreateExtractElement(C, (uint64_t)0); 2995 2996 if (!isa<ConstantInt>(CI->getArgOperand(4)) || 2997 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) { 2998 Value *Ops[] = { A, B, C, CI->getArgOperand(4) }; 2999 3000 Intrinsic::ID IID; 3001 if (Name.back() == 'd') 3002 IID = Intrinsic::x86_avx512_vfmadd_f64; 3003 else 3004 IID = Intrinsic::x86_avx512_vfmadd_f32; 3005 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID); 3006 Rep = Builder.CreateCall(FMA, Ops); 3007 } else { 3008 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3009 Intrinsic::fma, 3010 A->getType()); 3011 Rep = Builder.CreateCall(FMA, { A, B, C }); 3012 } 3013 3014 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) : 3015 IsMask3 ? C : A; 3016 3017 // For Mask3 with NegAcc, we need to create a new extractelement that 3018 // avoids the negation above. 3019 if (NegAcc && IsMask3) 3020 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2), 3021 (uint64_t)0); 3022 3023 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3), 3024 Rep, PassThru); 3025 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0), 3026 Rep, (uint64_t)0); 3027 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") || 3028 Name.startswith("avx512.mask.vfnmadd.p") || 3029 Name.startswith("avx512.mask.vfnmsub.p") || 3030 Name.startswith("avx512.mask3.vfmadd.p") || 3031 Name.startswith("avx512.mask3.vfmsub.p") || 3032 Name.startswith("avx512.mask3.vfnmsub.p") || 3033 Name.startswith("avx512.maskz.vfmadd.p"))) { 3034 bool IsMask3 = Name[11] == '3'; 3035 bool IsMaskZ = Name[11] == 'z'; 3036 // Drop the "avx512.mask." to make it easier. 3037 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3038 bool NegMul = Name[2] == 'n'; 3039 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3040 3041 Value *A = CI->getArgOperand(0); 3042 Value *B = CI->getArgOperand(1); 3043 Value *C = CI->getArgOperand(2); 3044 3045 if (NegMul && (IsMask3 || IsMaskZ)) 3046 A = Builder.CreateFNeg(A); 3047 if (NegMul && !(IsMask3 || IsMaskZ)) 3048 B = Builder.CreateFNeg(B); 3049 if (NegAcc) 3050 C = Builder.CreateFNeg(C); 3051 3052 if (CI->getNumArgOperands() == 5 && 3053 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3054 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3055 Intrinsic::ID IID; 3056 // Check the character before ".512" in string. 3057 if (Name[Name.size()-5] == 's') 3058 IID = Intrinsic::x86_avx512_vfmadd_ps_512; 3059 else 3060 IID = Intrinsic::x86_avx512_vfmadd_pd_512; 3061 3062 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3063 { A, B, C, CI->getArgOperand(4) }); 3064 } else { 3065 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3066 Intrinsic::fma, 3067 A->getType()); 3068 Rep = Builder.CreateCall(FMA, { A, B, C }); 3069 } 3070 3071 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3072 IsMask3 ? CI->getArgOperand(2) : 3073 CI->getArgOperand(0); 3074 3075 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3076 } else if (IsX86 && (Name.startswith("fma.vfmaddsub.p") || 3077 Name.startswith("fma.vfmsubadd.p"))) { 3078 bool IsSubAdd = Name[7] == 's'; 3079 int NumElts = CI->getType()->getVectorNumElements(); 3080 3081 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3082 CI->getArgOperand(2) }; 3083 3084 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3085 Ops[0]->getType()); 3086 Value *Odd = Builder.CreateCall(FMA, Ops); 3087 Ops[2] = Builder.CreateFNeg(Ops[2]); 3088 Value *Even = Builder.CreateCall(FMA, Ops); 3089 3090 if (IsSubAdd) 3091 std::swap(Even, Odd); 3092 3093 SmallVector<uint32_t, 32> Idxs(NumElts); 3094 for (int i = 0; i != NumElts; ++i) 3095 Idxs[i] = i + (i % 2) * NumElts; 3096 3097 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3098 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") || 3099 Name.startswith("avx512.mask3.vfmaddsub.p") || 3100 Name.startswith("avx512.maskz.vfmaddsub.p") || 3101 Name.startswith("avx512.mask3.vfmsubadd.p"))) { 3102 bool IsMask3 = Name[11] == '3'; 3103 bool IsMaskZ = Name[11] == 'z'; 3104 // Drop the "avx512.mask." to make it easier. 3105 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3106 bool IsSubAdd = Name[3] == 's'; 3107 if (CI->getNumArgOperands() == 5 && 3108 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3109 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3110 Intrinsic::ID IID; 3111 // Check the character before ".512" in string. 3112 if (Name[Name.size()-5] == 's') 3113 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512; 3114 else 3115 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512; 3116 3117 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3118 CI->getArgOperand(2), CI->getArgOperand(4) }; 3119 if (IsSubAdd) 3120 Ops[2] = Builder.CreateFNeg(Ops[2]); 3121 3122 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3123 {CI->getArgOperand(0), CI->getArgOperand(1), 3124 CI->getArgOperand(2), CI->getArgOperand(4)}); 3125 } else { 3126 int NumElts = CI->getType()->getVectorNumElements(); 3127 3128 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3129 CI->getArgOperand(2) }; 3130 3131 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3132 Ops[0]->getType()); 3133 Value *Odd = Builder.CreateCall(FMA, Ops); 3134 Ops[2] = Builder.CreateFNeg(Ops[2]); 3135 Value *Even = Builder.CreateCall(FMA, Ops); 3136 3137 if (IsSubAdd) 3138 std::swap(Even, Odd); 3139 3140 SmallVector<uint32_t, 32> Idxs(NumElts); 3141 for (int i = 0; i != NumElts; ++i) 3142 Idxs[i] = i + (i % 2) * NumElts; 3143 3144 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3145 } 3146 3147 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3148 IsMask3 ? CI->getArgOperand(2) : 3149 CI->getArgOperand(0); 3150 3151 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3152 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") || 3153 Name.startswith("avx512.maskz.pternlog."))) { 3154 bool ZeroMask = Name[11] == 'z'; 3155 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3156 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 3157 Intrinsic::ID IID; 3158 if (VecWidth == 128 && EltWidth == 32) 3159 IID = Intrinsic::x86_avx512_pternlog_d_128; 3160 else if (VecWidth == 256 && EltWidth == 32) 3161 IID = Intrinsic::x86_avx512_pternlog_d_256; 3162 else if (VecWidth == 512 && EltWidth == 32) 3163 IID = Intrinsic::x86_avx512_pternlog_d_512; 3164 else if (VecWidth == 128 && EltWidth == 64) 3165 IID = Intrinsic::x86_avx512_pternlog_q_128; 3166 else if (VecWidth == 256 && EltWidth == 64) 3167 IID = Intrinsic::x86_avx512_pternlog_q_256; 3168 else if (VecWidth == 512 && EltWidth == 64) 3169 IID = Intrinsic::x86_avx512_pternlog_q_512; 3170 else 3171 llvm_unreachable("Unexpected intrinsic"); 3172 3173 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3174 CI->getArgOperand(2), CI->getArgOperand(3) }; 3175 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3176 Args); 3177 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3178 : CI->getArgOperand(0); 3179 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru); 3180 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") || 3181 Name.startswith("avx512.maskz.vpmadd52"))) { 3182 bool ZeroMask = Name[11] == 'z'; 3183 bool High = Name[20] == 'h' || Name[21] == 'h'; 3184 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3185 Intrinsic::ID IID; 3186 if (VecWidth == 128 && !High) 3187 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128; 3188 else if (VecWidth == 256 && !High) 3189 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256; 3190 else if (VecWidth == 512 && !High) 3191 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512; 3192 else if (VecWidth == 128 && High) 3193 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128; 3194 else if (VecWidth == 256 && High) 3195 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256; 3196 else if (VecWidth == 512 && High) 3197 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512; 3198 else 3199 llvm_unreachable("Unexpected intrinsic"); 3200 3201 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3202 CI->getArgOperand(2) }; 3203 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3204 Args); 3205 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3206 : CI->getArgOperand(0); 3207 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3208 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") || 3209 Name.startswith("avx512.mask.vpermt2var.") || 3210 Name.startswith("avx512.maskz.vpermt2var."))) { 3211 bool ZeroMask = Name[11] == 'z'; 3212 bool IndexForm = Name[17] == 'i'; 3213 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm); 3214 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") || 3215 Name.startswith("avx512.maskz.vpdpbusd.") || 3216 Name.startswith("avx512.mask.vpdpbusds.") || 3217 Name.startswith("avx512.maskz.vpdpbusds."))) { 3218 bool ZeroMask = Name[11] == 'z'; 3219 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3220 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3221 Intrinsic::ID IID; 3222 if (VecWidth == 128 && !IsSaturating) 3223 IID = Intrinsic::x86_avx512_vpdpbusd_128; 3224 else if (VecWidth == 256 && !IsSaturating) 3225 IID = Intrinsic::x86_avx512_vpdpbusd_256; 3226 else if (VecWidth == 512 && !IsSaturating) 3227 IID = Intrinsic::x86_avx512_vpdpbusd_512; 3228 else if (VecWidth == 128 && IsSaturating) 3229 IID = Intrinsic::x86_avx512_vpdpbusds_128; 3230 else if (VecWidth == 256 && IsSaturating) 3231 IID = Intrinsic::x86_avx512_vpdpbusds_256; 3232 else if (VecWidth == 512 && IsSaturating) 3233 IID = Intrinsic::x86_avx512_vpdpbusds_512; 3234 else 3235 llvm_unreachable("Unexpected intrinsic"); 3236 3237 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3238 CI->getArgOperand(2) }; 3239 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3240 Args); 3241 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3242 : CI->getArgOperand(0); 3243 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3244 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") || 3245 Name.startswith("avx512.maskz.vpdpwssd.") || 3246 Name.startswith("avx512.mask.vpdpwssds.") || 3247 Name.startswith("avx512.maskz.vpdpwssds."))) { 3248 bool ZeroMask = Name[11] == 'z'; 3249 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3250 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3251 Intrinsic::ID IID; 3252 if (VecWidth == 128 && !IsSaturating) 3253 IID = Intrinsic::x86_avx512_vpdpwssd_128; 3254 else if (VecWidth == 256 && !IsSaturating) 3255 IID = Intrinsic::x86_avx512_vpdpwssd_256; 3256 else if (VecWidth == 512 && !IsSaturating) 3257 IID = Intrinsic::x86_avx512_vpdpwssd_512; 3258 else if (VecWidth == 128 && IsSaturating) 3259 IID = Intrinsic::x86_avx512_vpdpwssds_128; 3260 else if (VecWidth == 256 && IsSaturating) 3261 IID = Intrinsic::x86_avx512_vpdpwssds_256; 3262 else if (VecWidth == 512 && IsSaturating) 3263 IID = Intrinsic::x86_avx512_vpdpwssds_512; 3264 else 3265 llvm_unreachable("Unexpected intrinsic"); 3266 3267 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3268 CI->getArgOperand(2) }; 3269 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3270 Args); 3271 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3272 : CI->getArgOperand(0); 3273 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3274 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" || 3275 Name == "addcarry.u32" || Name == "addcarry.u64" || 3276 Name == "subborrow.u32" || Name == "subborrow.u64")) { 3277 Intrinsic::ID IID; 3278 if (Name[0] == 'a' && Name.back() == '2') 3279 IID = Intrinsic::x86_addcarry_32; 3280 else if (Name[0] == 'a' && Name.back() == '4') 3281 IID = Intrinsic::x86_addcarry_64; 3282 else if (Name[0] == 's' && Name.back() == '2') 3283 IID = Intrinsic::x86_subborrow_32; 3284 else if (Name[0] == 's' && Name.back() == '4') 3285 IID = Intrinsic::x86_subborrow_64; 3286 else 3287 llvm_unreachable("Unexpected intrinsic"); 3288 3289 // Make a call with 3 operands. 3290 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3291 CI->getArgOperand(2)}; 3292 Value *NewCall = Builder.CreateCall( 3293 Intrinsic::getDeclaration(CI->getModule(), IID), 3294 Args); 3295 3296 // Extract the second result and store it. 3297 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3298 // Cast the pointer to the right type. 3299 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3), 3300 llvm::PointerType::getUnqual(Data->getType())); 3301 Builder.CreateAlignedStore(Data, Ptr, 1); 3302 // Replace the original call result with the first result of the new call. 3303 Value *CF = Builder.CreateExtractValue(NewCall, 0); 3304 3305 CI->replaceAllUsesWith(CF); 3306 Rep = nullptr; 3307 } else if (IsX86 && Name.startswith("avx512.mask.") && 3308 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) { 3309 // Rep will be updated by the call in the condition. 3310 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 3311 Value *Arg = CI->getArgOperand(0); 3312 Value *Neg = Builder.CreateNeg(Arg, "neg"); 3313 Value *Cmp = Builder.CreateICmpSGE( 3314 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 3315 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 3316 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 3317 Name == "max.ui" || Name == "max.ull")) { 3318 Value *Arg0 = CI->getArgOperand(0); 3319 Value *Arg1 = CI->getArgOperand(1); 3320 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3321 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 3322 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 3323 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 3324 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 3325 Name == "min.ui" || Name == "min.ull")) { 3326 Value *Arg0 = CI->getArgOperand(0); 3327 Value *Arg1 = CI->getArgOperand(1); 3328 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3329 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 3330 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 3331 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 3332 } else if (IsNVVM && Name == "clz.ll") { 3333 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 3334 Value *Arg = CI->getArgOperand(0); 3335 Value *Ctlz = Builder.CreateCall( 3336 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 3337 {Arg->getType()}), 3338 {Arg, Builder.getFalse()}, "ctlz"); 3339 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 3340 } else if (IsNVVM && Name == "popc.ll") { 3341 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 3342 // i64. 3343 Value *Arg = CI->getArgOperand(0); 3344 Value *Popc = Builder.CreateCall( 3345 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 3346 {Arg->getType()}), 3347 Arg, "ctpop"); 3348 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 3349 } else if (IsNVVM && Name == "h2f") { 3350 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 3351 F->getParent(), Intrinsic::convert_from_fp16, 3352 {Builder.getFloatTy()}), 3353 CI->getArgOperand(0), "h2f"); 3354 } else { 3355 llvm_unreachable("Unknown function for CallInst upgrade."); 3356 } 3357 3358 if (Rep) 3359 CI->replaceAllUsesWith(Rep); 3360 CI->eraseFromParent(); 3361 return; 3362 } 3363 3364 const auto &DefaultCase = [&NewFn, &CI]() -> void { 3365 // Handle generic mangling change, but nothing else 3366 assert( 3367 (CI->getCalledFunction()->getName() != NewFn->getName()) && 3368 "Unknown function for CallInst upgrade and isn't just a name change"); 3369 CI->setCalledFunction(NewFn); 3370 }; 3371 CallInst *NewCall = nullptr; 3372 switch (NewFn->getIntrinsicID()) { 3373 default: { 3374 DefaultCase(); 3375 return; 3376 } 3377 3378 case Intrinsic::arm_neon_vld1: 3379 case Intrinsic::arm_neon_vld2: 3380 case Intrinsic::arm_neon_vld3: 3381 case Intrinsic::arm_neon_vld4: 3382 case Intrinsic::arm_neon_vld2lane: 3383 case Intrinsic::arm_neon_vld3lane: 3384 case Intrinsic::arm_neon_vld4lane: 3385 case Intrinsic::arm_neon_vst1: 3386 case Intrinsic::arm_neon_vst2: 3387 case Intrinsic::arm_neon_vst3: 3388 case Intrinsic::arm_neon_vst4: 3389 case Intrinsic::arm_neon_vst2lane: 3390 case Intrinsic::arm_neon_vst3lane: 3391 case Intrinsic::arm_neon_vst4lane: { 3392 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3393 CI->arg_operands().end()); 3394 NewCall = Builder.CreateCall(NewFn, Args); 3395 break; 3396 } 3397 3398 case Intrinsic::bitreverse: 3399 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3400 break; 3401 3402 case Intrinsic::ctlz: 3403 case Intrinsic::cttz: 3404 assert(CI->getNumArgOperands() == 1 && 3405 "Mismatch between function args and call args"); 3406 NewCall = 3407 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 3408 break; 3409 3410 case Intrinsic::objectsize: { 3411 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 3412 ? Builder.getFalse() 3413 : CI->getArgOperand(2); 3414 NewCall = Builder.CreateCall( 3415 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize}); 3416 break; 3417 } 3418 3419 case Intrinsic::ctpop: 3420 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3421 break; 3422 3423 case Intrinsic::convert_from_fp16: 3424 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3425 break; 3426 3427 case Intrinsic::dbg_value: 3428 // Upgrade from the old version that had an extra offset argument. 3429 assert(CI->getNumArgOperands() == 4); 3430 // Drop nonzero offsets instead of attempting to upgrade them. 3431 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1))) 3432 if (Offset->isZeroValue()) { 3433 NewCall = Builder.CreateCall( 3434 NewFn, 3435 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)}); 3436 break; 3437 } 3438 CI->eraseFromParent(); 3439 return; 3440 3441 case Intrinsic::x86_xop_vfrcz_ss: 3442 case Intrinsic::x86_xop_vfrcz_sd: 3443 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 3444 break; 3445 3446 case Intrinsic::x86_xop_vpermil2pd: 3447 case Intrinsic::x86_xop_vpermil2ps: 3448 case Intrinsic::x86_xop_vpermil2pd_256: 3449 case Intrinsic::x86_xop_vpermil2ps_256: { 3450 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3451 CI->arg_operands().end()); 3452 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 3453 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 3454 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 3455 NewCall = Builder.CreateCall(NewFn, Args); 3456 break; 3457 } 3458 3459 case Intrinsic::x86_sse41_ptestc: 3460 case Intrinsic::x86_sse41_ptestz: 3461 case Intrinsic::x86_sse41_ptestnzc: { 3462 // The arguments for these intrinsics used to be v4f32, and changed 3463 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 3464 // So, the only thing required is a bitcast for both arguments. 3465 // First, check the arguments have the old type. 3466 Value *Arg0 = CI->getArgOperand(0); 3467 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 3468 return; 3469 3470 // Old intrinsic, add bitcasts 3471 Value *Arg1 = CI->getArgOperand(1); 3472 3473 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 3474 3475 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 3476 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 3477 3478 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 3479 break; 3480 } 3481 3482 case Intrinsic::x86_rdtscp: { 3483 // This used to take 1 arguments. If we have no arguments, it is already 3484 // upgraded. 3485 if (CI->getNumOperands() == 0) 3486 return; 3487 3488 NewCall = Builder.CreateCall(NewFn); 3489 // Extract the second result and store it. 3490 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3491 // Cast the pointer to the right type. 3492 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0), 3493 llvm::PointerType::getUnqual(Data->getType())); 3494 Builder.CreateAlignedStore(Data, Ptr, 1); 3495 // Replace the original call result with the first result of the new call. 3496 Value *TSC = Builder.CreateExtractValue(NewCall, 0); 3497 3498 std::string Name = CI->getName(); 3499 if (!Name.empty()) { 3500 CI->setName(Name + ".old"); 3501 NewCall->setName(Name); 3502 } 3503 CI->replaceAllUsesWith(TSC); 3504 CI->eraseFromParent(); 3505 return; 3506 } 3507 3508 case Intrinsic::x86_sse41_insertps: 3509 case Intrinsic::x86_sse41_dppd: 3510 case Intrinsic::x86_sse41_dpps: 3511 case Intrinsic::x86_sse41_mpsadbw: 3512 case Intrinsic::x86_avx_dp_ps_256: 3513 case Intrinsic::x86_avx2_mpsadbw: { 3514 // Need to truncate the last argument from i32 to i8 -- this argument models 3515 // an inherently 8-bit immediate operand to these x86 instructions. 3516 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3517 CI->arg_operands().end()); 3518 3519 // Replace the last argument with a trunc. 3520 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 3521 NewCall = Builder.CreateCall(NewFn, Args); 3522 break; 3523 } 3524 3525 case Intrinsic::thread_pointer: { 3526 NewCall = Builder.CreateCall(NewFn, {}); 3527 break; 3528 } 3529 3530 case Intrinsic::invariant_start: 3531 case Intrinsic::invariant_end: 3532 case Intrinsic::masked_load: 3533 case Intrinsic::masked_store: 3534 case Intrinsic::masked_gather: 3535 case Intrinsic::masked_scatter: { 3536 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3537 CI->arg_operands().end()); 3538 NewCall = Builder.CreateCall(NewFn, Args); 3539 break; 3540 } 3541 3542 case Intrinsic::memcpy: 3543 case Intrinsic::memmove: 3544 case Intrinsic::memset: { 3545 // We have to make sure that the call signature is what we're expecting. 3546 // We only want to change the old signatures by removing the alignment arg: 3547 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1) 3548 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1) 3549 // @llvm.memset...(i8*, i8, i[32|64], i32, i1) 3550 // -> @llvm.memset...(i8*, i8, i[32|64], i1) 3551 // Note: i8*'s in the above can be any pointer type 3552 if (CI->getNumArgOperands() != 5) { 3553 DefaultCase(); 3554 return; 3555 } 3556 // Remove alignment argument (3), and add alignment attributes to the 3557 // dest/src pointers. 3558 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), 3559 CI->getArgOperand(2), CI->getArgOperand(4)}; 3560 NewCall = Builder.CreateCall(NewFn, Args); 3561 auto *MemCI = cast<MemIntrinsic>(NewCall); 3562 // All mem intrinsics support dest alignment. 3563 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3)); 3564 MemCI->setDestAlignment(Align->getZExtValue()); 3565 // Memcpy/Memmove also support source alignment. 3566 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) 3567 MTI->setSourceAlignment(Align->getZExtValue()); 3568 break; 3569 } 3570 } 3571 assert(NewCall && "Should have either set this variable or returned through " 3572 "the default case"); 3573 std::string Name = CI->getName(); 3574 if (!Name.empty()) { 3575 CI->setName(Name + ".old"); 3576 NewCall->setName(Name); 3577 } 3578 CI->replaceAllUsesWith(NewCall); 3579 CI->eraseFromParent(); 3580 } 3581 3582 void llvm::UpgradeCallsToIntrinsic(Function *F) { 3583 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 3584 3585 // Check if this function should be upgraded and get the replacement function 3586 // if there is one. 3587 Function *NewFn; 3588 if (UpgradeIntrinsicFunction(F, NewFn)) { 3589 // Replace all users of the old function with the new function or new 3590 // instructions. This is not a range loop because the call is deleted. 3591 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 3592 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 3593 UpgradeIntrinsicCall(CI, NewFn); 3594 3595 // Remove old function, no longer used, from the module. 3596 F->eraseFromParent(); 3597 } 3598 } 3599 3600 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 3601 // Check if the tag uses struct-path aware TBAA format. 3602 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 3603 return &MD; 3604 3605 auto &Context = MD.getContext(); 3606 if (MD.getNumOperands() == 3) { 3607 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 3608 MDNode *ScalarType = MDNode::get(Context, Elts); 3609 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 3610 Metadata *Elts2[] = {ScalarType, ScalarType, 3611 ConstantAsMetadata::get( 3612 Constant::getNullValue(Type::getInt64Ty(Context))), 3613 MD.getOperand(2)}; 3614 return MDNode::get(Context, Elts2); 3615 } 3616 // Create a MDNode <MD, MD, offset 0> 3617 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 3618 Type::getInt64Ty(Context)))}; 3619 return MDNode::get(Context, Elts); 3620 } 3621 3622 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 3623 Instruction *&Temp) { 3624 if (Opc != Instruction::BitCast) 3625 return nullptr; 3626 3627 Temp = nullptr; 3628 Type *SrcTy = V->getType(); 3629 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3630 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3631 LLVMContext &Context = V->getContext(); 3632 3633 // We have no information about target data layout, so we assume that 3634 // the maximum pointer size is 64bit. 3635 Type *MidTy = Type::getInt64Ty(Context); 3636 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 3637 3638 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 3639 } 3640 3641 return nullptr; 3642 } 3643 3644 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 3645 if (Opc != Instruction::BitCast) 3646 return nullptr; 3647 3648 Type *SrcTy = C->getType(); 3649 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3650 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3651 LLVMContext &Context = C->getContext(); 3652 3653 // We have no information about target data layout, so we assume that 3654 // the maximum pointer size is 64bit. 3655 Type *MidTy = Type::getInt64Ty(Context); 3656 3657 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 3658 DestTy); 3659 } 3660 3661 return nullptr; 3662 } 3663 3664 /// Check the debug info version number, if it is out-dated, drop the debug 3665 /// info. Return true if module is modified. 3666 bool llvm::UpgradeDebugInfo(Module &M) { 3667 unsigned Version = getDebugMetadataVersionFromModule(M); 3668 if (Version == DEBUG_METADATA_VERSION) { 3669 bool BrokenDebugInfo = false; 3670 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo)) 3671 report_fatal_error("Broken module found, compilation aborted!"); 3672 if (!BrokenDebugInfo) 3673 // Everything is ok. 3674 return false; 3675 else { 3676 // Diagnose malformed debug info. 3677 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M); 3678 M.getContext().diagnose(Diag); 3679 } 3680 } 3681 bool Modified = StripDebugInfo(M); 3682 if (Modified && Version != DEBUG_METADATA_VERSION) { 3683 // Diagnose a version mismatch. 3684 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 3685 M.getContext().diagnose(DiagVersion); 3686 } 3687 return Modified; 3688 } 3689 3690 bool llvm::UpgradeRetainReleaseMarker(Module &M) { 3691 bool Changed = false; 3692 NamedMDNode *ModRetainReleaseMarker = 3693 M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"); 3694 if (ModRetainReleaseMarker) { 3695 MDNode *Op = ModRetainReleaseMarker->getOperand(0); 3696 if (Op) { 3697 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0)); 3698 if (ID) { 3699 SmallVector<StringRef, 4> ValueComp; 3700 ID->getString().split(ValueComp, "#"); 3701 if (ValueComp.size() == 2) { 3702 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str(); 3703 Metadata *Ops[1] = {MDString::get(M.getContext(), NewValue)}; 3704 ModRetainReleaseMarker->setOperand(0, 3705 MDNode::get(M.getContext(), Ops)); 3706 Changed = true; 3707 } 3708 } 3709 } 3710 } 3711 return Changed; 3712 } 3713 3714 bool llvm::UpgradeModuleFlags(Module &M) { 3715 NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 3716 if (!ModFlags) 3717 return false; 3718 3719 bool HasObjCFlag = false, HasClassProperties = false, Changed = false; 3720 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 3721 MDNode *Op = ModFlags->getOperand(I); 3722 if (Op->getNumOperands() != 3) 3723 continue; 3724 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 3725 if (!ID) 3726 continue; 3727 if (ID->getString() == "Objective-C Image Info Version") 3728 HasObjCFlag = true; 3729 if (ID->getString() == "Objective-C Class Properties") 3730 HasClassProperties = true; 3731 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two 3732 // field was Error and now they are Max. 3733 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") { 3734 if (auto *Behavior = 3735 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) { 3736 if (Behavior->getLimitedValue() == Module::Error) { 3737 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 3738 Metadata *Ops[3] = { 3739 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)), 3740 MDString::get(M.getContext(), ID->getString()), 3741 Op->getOperand(2)}; 3742 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3743 Changed = true; 3744 } 3745 } 3746 } 3747 // Upgrade Objective-C Image Info Section. Removed the whitespce in the 3748 // section name so that llvm-lto will not complain about mismatching 3749 // module flags that is functionally the same. 3750 if (ID->getString() == "Objective-C Image Info Section") { 3751 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) { 3752 SmallVector<StringRef, 4> ValueComp; 3753 Value->getString().split(ValueComp, " "); 3754 if (ValueComp.size() != 1) { 3755 std::string NewValue; 3756 for (auto &S : ValueComp) 3757 NewValue += S.str(); 3758 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1), 3759 MDString::get(M.getContext(), NewValue)}; 3760 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3761 Changed = true; 3762 } 3763 } 3764 } 3765 } 3766 3767 // "Objective-C Class Properties" is recently added for Objective-C. We 3768 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 3769 // flag of value 0, so we can correclty downgrade this flag when trying to 3770 // link an ObjC bitcode without this module flag with an ObjC bitcode with 3771 // this module flag. 3772 if (HasObjCFlag && !HasClassProperties) { 3773 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 3774 (uint32_t)0); 3775 Changed = true; 3776 } 3777 3778 return Changed; 3779 } 3780 3781 void llvm::UpgradeSectionAttributes(Module &M) { 3782 auto TrimSpaces = [](StringRef Section) -> std::string { 3783 SmallVector<StringRef, 5> Components; 3784 Section.split(Components, ','); 3785 3786 SmallString<32> Buffer; 3787 raw_svector_ostream OS(Buffer); 3788 3789 for (auto Component : Components) 3790 OS << ',' << Component.trim(); 3791 3792 return OS.str().substr(1); 3793 }; 3794 3795 for (auto &GV : M.globals()) { 3796 if (!GV.hasSection()) 3797 continue; 3798 3799 StringRef Section = GV.getSection(); 3800 3801 if (!Section.startswith("__DATA, __objc_catlist")) 3802 continue; 3803 3804 // __DATA, __objc_catlist, regular, no_dead_strip 3805 // __DATA,__objc_catlist,regular,no_dead_strip 3806 GV.setSection(TrimSpaces(Section)); 3807 } 3808 } 3809 3810 static bool isOldLoopArgument(Metadata *MD) { 3811 auto *T = dyn_cast_or_null<MDTuple>(MD); 3812 if (!T) 3813 return false; 3814 if (T->getNumOperands() < 1) 3815 return false; 3816 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 3817 if (!S) 3818 return false; 3819 return S->getString().startswith("llvm.vectorizer."); 3820 } 3821 3822 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 3823 StringRef OldPrefix = "llvm.vectorizer."; 3824 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 3825 3826 if (OldTag == "llvm.vectorizer.unroll") 3827 return MDString::get(C, "llvm.loop.interleave.count"); 3828 3829 return MDString::get( 3830 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 3831 .str()); 3832 } 3833 3834 static Metadata *upgradeLoopArgument(Metadata *MD) { 3835 auto *T = dyn_cast_or_null<MDTuple>(MD); 3836 if (!T) 3837 return MD; 3838 if (T->getNumOperands() < 1) 3839 return MD; 3840 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 3841 if (!OldTag) 3842 return MD; 3843 if (!OldTag->getString().startswith("llvm.vectorizer.")) 3844 return MD; 3845 3846 // This has an old tag. Upgrade it. 3847 SmallVector<Metadata *, 8> Ops; 3848 Ops.reserve(T->getNumOperands()); 3849 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 3850 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 3851 Ops.push_back(T->getOperand(I)); 3852 3853 return MDTuple::get(T->getContext(), Ops); 3854 } 3855 3856 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 3857 auto *T = dyn_cast<MDTuple>(&N); 3858 if (!T) 3859 return &N; 3860 3861 if (none_of(T->operands(), isOldLoopArgument)) 3862 return &N; 3863 3864 SmallVector<Metadata *, 8> Ops; 3865 Ops.reserve(T->getNumOperands()); 3866 for (Metadata *MD : T->operands()) 3867 Ops.push_back(upgradeLoopArgument(MD)); 3868 3869 return MDTuple::get(T->getContext(), Ops); 3870 } 3871