1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the auto-upgrade helper functions. 10 // This is where deprecated IR intrinsics and other IR features are updated to 11 // current specifications. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/AutoUpgrade.h" 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/DIBuilder.h" 19 #include "llvm/IR/DebugInfo.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/IntrinsicsAArch64.h" 26 #include "llvm/IR/IntrinsicsARM.h" 27 #include "llvm/IR/IntrinsicsX86.h" 28 #include "llvm/IR/LLVMContext.h" 29 #include "llvm/IR/Module.h" 30 #include "llvm/IR/Verifier.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/Regex.h" 33 #include <cstring> 34 using namespace llvm; 35 36 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 37 38 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 39 // changed their type from v4f32 to v2i64. 40 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 41 Function *&NewFn) { 42 // Check whether this is an old version of the function, which received 43 // v4f32 arguments. 44 Type *Arg0Type = F->getFunctionType()->getParamType(0); 45 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 46 return false; 47 48 // Yes, it's old, replace it with new version. 49 rename(F); 50 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 51 return true; 52 } 53 54 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 55 // arguments have changed their type from i32 to i8. 56 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 57 Function *&NewFn) { 58 // Check that the last argument is an i32. 59 Type *LastArgType = F->getFunctionType()->getParamType( 60 F->getFunctionType()->getNumParams() - 1); 61 if (!LastArgType->isIntegerTy(32)) 62 return false; 63 64 // Move this function aside and map down. 65 rename(F); 66 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 67 return true; 68 } 69 70 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 71 // All of the intrinsics matches below should be marked with which llvm 72 // version started autoupgrading them. At some point in the future we would 73 // like to use this information to remove upgrade code for some older 74 // intrinsics. It is currently undecided how we will determine that future 75 // point. 76 if (Name == "addcarryx.u32" || // Added in 8.0 77 Name == "addcarryx.u64" || // Added in 8.0 78 Name == "addcarry.u32" || // Added in 8.0 79 Name == "addcarry.u64" || // Added in 8.0 80 Name == "subborrow.u32" || // Added in 8.0 81 Name == "subborrow.u64" || // Added in 8.0 82 Name.startswith("sse2.padds.") || // Added in 8.0 83 Name.startswith("sse2.psubs.") || // Added in 8.0 84 Name.startswith("sse2.paddus.") || // Added in 8.0 85 Name.startswith("sse2.psubus.") || // Added in 8.0 86 Name.startswith("avx2.padds.") || // Added in 8.0 87 Name.startswith("avx2.psubs.") || // Added in 8.0 88 Name.startswith("avx2.paddus.") || // Added in 8.0 89 Name.startswith("avx2.psubus.") || // Added in 8.0 90 Name.startswith("avx512.padds.") || // Added in 8.0 91 Name.startswith("avx512.psubs.") || // Added in 8.0 92 Name.startswith("avx512.mask.padds.") || // Added in 8.0 93 Name.startswith("avx512.mask.psubs.") || // Added in 8.0 94 Name.startswith("avx512.mask.paddus.") || // Added in 8.0 95 Name.startswith("avx512.mask.psubus.") || // Added in 8.0 96 Name=="ssse3.pabs.b.128" || // Added in 6.0 97 Name=="ssse3.pabs.w.128" || // Added in 6.0 98 Name=="ssse3.pabs.d.128" || // Added in 6.0 99 Name.startswith("fma4.vfmadd.s") || // Added in 7.0 100 Name.startswith("fma.vfmadd.") || // Added in 7.0 101 Name.startswith("fma.vfmsub.") || // Added in 7.0 102 Name.startswith("fma.vfmsubadd.") || // Added in 7.0 103 Name.startswith("fma.vfnmadd.") || // Added in 7.0 104 Name.startswith("fma.vfnmsub.") || // Added in 7.0 105 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0 106 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0 107 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0 108 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0 109 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0 110 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0 111 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0 112 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0 113 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0 114 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0 115 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0 116 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0 117 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0 118 Name.startswith("avx512.kunpck") || //added in 6.0 119 Name.startswith("avx2.pabs.") || // Added in 6.0 120 Name.startswith("avx512.mask.pabs.") || // Added in 6.0 121 Name.startswith("avx512.broadcastm") || // Added in 6.0 122 Name == "sse.sqrt.ss" || // Added in 7.0 123 Name == "sse2.sqrt.sd" || // Added in 7.0 124 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0 125 Name.startswith("avx.sqrt.p") || // Added in 7.0 126 Name.startswith("sse2.sqrt.p") || // Added in 7.0 127 Name.startswith("sse.sqrt.p") || // Added in 7.0 128 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0 129 Name.startswith("sse2.pcmpeq.") || // Added in 3.1 130 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 131 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 132 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 133 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 134 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 135 Name.startswith("avx.vperm2f128.") || // Added in 6.0 136 Name == "avx2.vperm2i128" || // Added in 6.0 137 Name == "sse.add.ss" || // Added in 4.0 138 Name == "sse2.add.sd" || // Added in 4.0 139 Name == "sse.sub.ss" || // Added in 4.0 140 Name == "sse2.sub.sd" || // Added in 4.0 141 Name == "sse.mul.ss" || // Added in 4.0 142 Name == "sse2.mul.sd" || // Added in 4.0 143 Name == "sse.div.ss" || // Added in 4.0 144 Name == "sse2.div.sd" || // Added in 4.0 145 Name == "sse41.pmaxsb" || // Added in 3.9 146 Name == "sse2.pmaxs.w" || // Added in 3.9 147 Name == "sse41.pmaxsd" || // Added in 3.9 148 Name == "sse2.pmaxu.b" || // Added in 3.9 149 Name == "sse41.pmaxuw" || // Added in 3.9 150 Name == "sse41.pmaxud" || // Added in 3.9 151 Name == "sse41.pminsb" || // Added in 3.9 152 Name == "sse2.pmins.w" || // Added in 3.9 153 Name == "sse41.pminsd" || // Added in 3.9 154 Name == "sse2.pminu.b" || // Added in 3.9 155 Name == "sse41.pminuw" || // Added in 3.9 156 Name == "sse41.pminud" || // Added in 3.9 157 Name == "avx512.kand.w" || // Added in 7.0 158 Name == "avx512.kandn.w" || // Added in 7.0 159 Name == "avx512.knot.w" || // Added in 7.0 160 Name == "avx512.kor.w" || // Added in 7.0 161 Name == "avx512.kxor.w" || // Added in 7.0 162 Name == "avx512.kxnor.w" || // Added in 7.0 163 Name == "avx512.kortestc.w" || // Added in 7.0 164 Name == "avx512.kortestz.w" || // Added in 7.0 165 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 166 Name.startswith("avx2.pmax") || // Added in 3.9 167 Name.startswith("avx2.pmin") || // Added in 3.9 168 Name.startswith("avx512.mask.pmax") || // Added in 4.0 169 Name.startswith("avx512.mask.pmin") || // Added in 4.0 170 Name.startswith("avx2.vbroadcast") || // Added in 3.8 171 Name.startswith("avx2.pbroadcast") || // Added in 3.8 172 Name.startswith("avx.vpermil.") || // Added in 3.1 173 Name.startswith("sse2.pshuf") || // Added in 3.9 174 Name.startswith("avx512.pbroadcast") || // Added in 3.9 175 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 176 Name.startswith("avx512.mask.movddup") || // Added in 3.9 177 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 178 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 179 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 180 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 181 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 182 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 183 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 184 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 185 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 186 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 187 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 188 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 189 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 190 Name.startswith("avx512.mask.pand.") || // Added in 3.9 191 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 192 Name.startswith("avx512.mask.por.") || // Added in 3.9 193 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 194 Name.startswith("avx512.mask.and.") || // Added in 3.9 195 Name.startswith("avx512.mask.andn.") || // Added in 3.9 196 Name.startswith("avx512.mask.or.") || // Added in 3.9 197 Name.startswith("avx512.mask.xor.") || // Added in 3.9 198 Name.startswith("avx512.mask.padd.") || // Added in 4.0 199 Name.startswith("avx512.mask.psub.") || // Added in 4.0 200 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 201 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 202 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 203 Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0 204 Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0 205 Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0 206 Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0 207 Name == "avx512.mask.vcvtph2ps.128" || // Added in 11.0 208 Name == "avx512.mask.vcvtph2ps.256" || // Added in 11.0 209 Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0 210 Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0 211 Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0 212 Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0 213 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0 214 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0 215 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0 216 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0 217 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0 218 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0 219 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0 220 Name == "avx512.cvtusi2sd" || // Added in 7.0 221 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 222 Name == "sse2.pmulu.dq" || // Added in 7.0 223 Name == "sse41.pmuldq" || // Added in 7.0 224 Name == "avx2.pmulu.dq" || // Added in 7.0 225 Name == "avx2.pmul.dq" || // Added in 7.0 226 Name == "avx512.pmulu.dq.512" || // Added in 7.0 227 Name == "avx512.pmul.dq.512" || // Added in 7.0 228 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 229 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 230 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0 231 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0 232 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0 233 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0 234 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0 235 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 236 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 237 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 238 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 239 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0 240 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0 241 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0 242 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0 243 Name.startswith("avx512.mask.cmp.p") || // Added in 7.0 244 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0 245 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0 246 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0 247 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0 248 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0 249 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 250 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 251 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 252 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 253 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 254 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 255 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 256 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 257 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 258 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 259 Name.startswith("avx512.mask.pslli") || // Added in 4.0 260 Name.startswith("avx512.mask.psrai") || // Added in 4.0 261 Name.startswith("avx512.mask.psrli") || // Added in 4.0 262 Name.startswith("avx512.mask.psllv") || // Added in 4.0 263 Name.startswith("avx512.mask.psrav") || // Added in 4.0 264 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 265 Name.startswith("sse41.pmovsx") || // Added in 3.8 266 Name.startswith("sse41.pmovzx") || // Added in 3.9 267 Name.startswith("avx2.pmovsx") || // Added in 3.9 268 Name.startswith("avx2.pmovzx") || // Added in 3.9 269 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 270 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 271 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 272 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0 273 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0 274 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0 275 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0 276 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0 277 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0 278 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0 279 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0 280 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0 281 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0 282 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0 283 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0 284 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0 285 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0 286 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0 287 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0 288 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0 289 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0 290 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0 291 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0 292 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0 293 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0 294 Name.startswith("avx512.vpshld.") || // Added in 8.0 295 Name.startswith("avx512.vpshrd.") || // Added in 8.0 296 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0 297 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0 298 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0 299 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0 300 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0 301 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0 302 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0 303 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0 304 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0 305 Name.startswith("avx512.mask.conflict.") || // Added in 9.0 306 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0 307 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0 308 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0 309 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0 310 Name == "sse.cvtsi2ss" || // Added in 7.0 311 Name == "sse.cvtsi642ss" || // Added in 7.0 312 Name == "sse2.cvtsi2sd" || // Added in 7.0 313 Name == "sse2.cvtsi642sd" || // Added in 7.0 314 Name == "sse2.cvtss2sd" || // Added in 7.0 315 Name == "sse2.cvtdq2pd" || // Added in 3.9 316 Name == "sse2.cvtdq2ps" || // Added in 7.0 317 Name == "sse2.cvtps2pd" || // Added in 3.9 318 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 319 Name == "avx.cvtdq2.ps.256" || // Added in 7.0 320 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 321 Name.startswith("vcvtph2ps.") || // Added in 11.0 322 Name.startswith("avx.vinsertf128.") || // Added in 3.7 323 Name == "avx2.vinserti128" || // Added in 3.7 324 Name.startswith("avx512.mask.insert") || // Added in 4.0 325 Name.startswith("avx.vextractf128.") || // Added in 3.7 326 Name == "avx2.vextracti128" || // Added in 3.7 327 Name.startswith("avx512.mask.vextract") || // Added in 4.0 328 Name.startswith("sse4a.movnt.") || // Added in 3.9 329 Name.startswith("avx.movnt.") || // Added in 3.2 330 Name.startswith("avx512.storent.") || // Added in 3.9 331 Name == "sse41.movntdqa" || // Added in 5.0 332 Name == "avx2.movntdqa" || // Added in 5.0 333 Name == "avx512.movntdqa" || // Added in 5.0 334 Name == "sse2.storel.dq" || // Added in 3.9 335 Name.startswith("sse.storeu.") || // Added in 3.9 336 Name.startswith("sse2.storeu.") || // Added in 3.9 337 Name.startswith("avx.storeu.") || // Added in 3.9 338 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 339 Name.startswith("avx512.mask.store.p") || // Added in 3.9 340 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 341 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 342 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 343 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 344 Name == "avx512.mask.store.ss" || // Added in 7.0 345 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 346 Name.startswith("avx512.mask.load.") || // Added in 3.9 347 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0 348 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0 349 Name.startswith("avx512.mask.expand.b") || // Added in 9.0 350 Name.startswith("avx512.mask.expand.w") || // Added in 9.0 351 Name.startswith("avx512.mask.expand.d") || // Added in 9.0 352 Name.startswith("avx512.mask.expand.q") || // Added in 9.0 353 Name.startswith("avx512.mask.expand.p") || // Added in 9.0 354 Name.startswith("avx512.mask.compress.b") || // Added in 9.0 355 Name.startswith("avx512.mask.compress.w") || // Added in 9.0 356 Name.startswith("avx512.mask.compress.d") || // Added in 9.0 357 Name.startswith("avx512.mask.compress.q") || // Added in 9.0 358 Name.startswith("avx512.mask.compress.p") || // Added in 9.0 359 Name == "sse42.crc32.64.8" || // Added in 3.4 360 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 361 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0 362 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 363 Name.startswith("avx512.mask.valign.") || // Added in 4.0 364 Name.startswith("sse2.psll.dq") || // Added in 3.7 365 Name.startswith("sse2.psrl.dq") || // Added in 3.7 366 Name.startswith("avx2.psll.dq") || // Added in 3.7 367 Name.startswith("avx2.psrl.dq") || // Added in 3.7 368 Name.startswith("avx512.psll.dq") || // Added in 3.9 369 Name.startswith("avx512.psrl.dq") || // Added in 3.9 370 Name == "sse41.pblendw" || // Added in 3.7 371 Name.startswith("sse41.blendp") || // Added in 3.7 372 Name.startswith("avx.blend.p") || // Added in 3.7 373 Name == "avx2.pblendw" || // Added in 3.7 374 Name.startswith("avx2.pblendd.") || // Added in 3.7 375 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 376 Name == "avx2.vbroadcasti128" || // Added in 3.7 377 Name.startswith("avx512.mask.broadcastf") || // Added in 6.0 378 Name.startswith("avx512.mask.broadcasti") || // Added in 6.0 379 Name == "xop.vpcmov" || // Added in 3.8 380 Name == "xop.vpcmov.256" || // Added in 5.0 381 Name.startswith("avx512.mask.move.s") || // Added in 4.0 382 Name.startswith("avx512.cvtmask2") || // Added in 5.0 383 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0 384 Name.startswith("xop.vprot") || // Added in 8.0 385 Name.startswith("avx512.prol") || // Added in 8.0 386 Name.startswith("avx512.pror") || // Added in 8.0 387 Name.startswith("avx512.mask.prorv.") || // Added in 8.0 388 Name.startswith("avx512.mask.pror.") || // Added in 8.0 389 Name.startswith("avx512.mask.prolv.") || // Added in 8.0 390 Name.startswith("avx512.mask.prol.") || // Added in 8.0 391 Name.startswith("avx512.ptestm") || //Added in 6.0 392 Name.startswith("avx512.ptestnm") || //Added in 6.0 393 Name.startswith("avx512.mask.pavg")) // Added in 6.0 394 return true; 395 396 return false; 397 } 398 399 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 400 Function *&NewFn) { 401 // Only handle intrinsics that start with "x86.". 402 if (!Name.startswith("x86.")) 403 return false; 404 // Remove "x86." prefix. 405 Name = Name.substr(4); 406 407 if (ShouldUpgradeX86Intrinsic(F, Name)) { 408 NewFn = nullptr; 409 return true; 410 } 411 412 if (Name == "rdtscp") { // Added in 8.0 413 // If this intrinsic has 0 operands, it's the new version. 414 if (F->getFunctionType()->getNumParams() == 0) 415 return false; 416 417 rename(F); 418 NewFn = Intrinsic::getDeclaration(F->getParent(), 419 Intrinsic::x86_rdtscp); 420 return true; 421 } 422 423 // SSE4.1 ptest functions may have an old signature. 424 if (Name.startswith("sse41.ptest")) { // Added in 3.2 425 if (Name.substr(11) == "c") 426 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 427 if (Name.substr(11) == "z") 428 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 429 if (Name.substr(11) == "nzc") 430 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 431 } 432 // Several blend and other instructions with masks used the wrong number of 433 // bits. 434 if (Name == "sse41.insertps") // Added in 3.6 435 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 436 NewFn); 437 if (Name == "sse41.dppd") // Added in 3.6 438 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 439 NewFn); 440 if (Name == "sse41.dpps") // Added in 3.6 441 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 442 NewFn); 443 if (Name == "sse41.mpsadbw") // Added in 3.6 444 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 445 NewFn); 446 if (Name == "avx.dp.ps.256") // Added in 3.6 447 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 448 NewFn); 449 if (Name == "avx2.mpsadbw") // Added in 3.6 450 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 451 NewFn); 452 453 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 454 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 455 rename(F); 456 NewFn = Intrinsic::getDeclaration(F->getParent(), 457 Intrinsic::x86_xop_vfrcz_ss); 458 return true; 459 } 460 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 461 rename(F); 462 NewFn = Intrinsic::getDeclaration(F->getParent(), 463 Intrinsic::x86_xop_vfrcz_sd); 464 return true; 465 } 466 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 467 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 468 auto Idx = F->getFunctionType()->getParamType(2); 469 if (Idx->isFPOrFPVectorTy()) { 470 rename(F); 471 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 472 unsigned EltSize = Idx->getScalarSizeInBits(); 473 Intrinsic::ID Permil2ID; 474 if (EltSize == 64 && IdxSize == 128) 475 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 476 else if (EltSize == 32 && IdxSize == 128) 477 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 478 else if (EltSize == 64 && IdxSize == 256) 479 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 480 else 481 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 482 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 483 return true; 484 } 485 } 486 487 if (Name == "seh.recoverfp") { 488 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp); 489 return true; 490 } 491 492 return false; 493 } 494 495 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 496 assert(F && "Illegal to upgrade a non-existent Function."); 497 498 // Quickly eliminate it, if it's not a candidate. 499 StringRef Name = F->getName(); 500 if (Name.size() <= 8 || !Name.startswith("llvm.")) 501 return false; 502 Name = Name.substr(5); // Strip off "llvm." 503 504 switch (Name[0]) { 505 default: break; 506 case 'a': { 507 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 508 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 509 F->arg_begin()->getType()); 510 return true; 511 } 512 if (Name.startswith("arm.neon.vclz")) { 513 Type* args[2] = { 514 F->arg_begin()->getType(), 515 Type::getInt1Ty(F->getContext()) 516 }; 517 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 518 // the end of the name. Change name from llvm.arm.neon.vclz.* to 519 // llvm.ctlz.* 520 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 521 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 522 "llvm.ctlz." + Name.substr(14), F->getParent()); 523 return true; 524 } 525 if (Name.startswith("arm.neon.vcnt")) { 526 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 527 F->arg_begin()->getType()); 528 return true; 529 } 530 static const Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 531 if (vldRegex.match(Name)) { 532 auto fArgs = F->getFunctionType()->params(); 533 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 534 // Can't use Intrinsic::getDeclaration here as the return types might 535 // then only be structurally equal. 536 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 537 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 538 "llvm." + Name + ".p0i8", F->getParent()); 539 return true; 540 } 541 static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 542 if (vstRegex.match(Name)) { 543 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 544 Intrinsic::arm_neon_vst2, 545 Intrinsic::arm_neon_vst3, 546 Intrinsic::arm_neon_vst4}; 547 548 static const Intrinsic::ID StoreLaneInts[] = { 549 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 550 Intrinsic::arm_neon_vst4lane 551 }; 552 553 auto fArgs = F->getFunctionType()->params(); 554 Type *Tys[] = {fArgs[0], fArgs[1]}; 555 if (Name.find("lane") == StringRef::npos) 556 NewFn = Intrinsic::getDeclaration(F->getParent(), 557 StoreInts[fArgs.size() - 3], Tys); 558 else 559 NewFn = Intrinsic::getDeclaration(F->getParent(), 560 StoreLaneInts[fArgs.size() - 5], Tys); 561 return true; 562 } 563 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 564 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 565 return true; 566 } 567 if (Name.startswith("arm.neon.vqadds.")) { 568 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sadd_sat, 569 F->arg_begin()->getType()); 570 return true; 571 } 572 if (Name.startswith("arm.neon.vqaddu.")) { 573 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::uadd_sat, 574 F->arg_begin()->getType()); 575 return true; 576 } 577 if (Name.startswith("arm.neon.vqsubs.")) { 578 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ssub_sat, 579 F->arg_begin()->getType()); 580 return true; 581 } 582 if (Name.startswith("arm.neon.vqsubu.")) { 583 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::usub_sat, 584 F->arg_begin()->getType()); 585 return true; 586 } 587 if (Name.startswith("aarch64.neon.addp")) { 588 if (F->arg_size() != 2) 589 break; // Invalid IR. 590 VectorType *Ty = dyn_cast<VectorType>(F->getReturnType()); 591 if (Ty && Ty->getElementType()->isFloatingPointTy()) { 592 NewFn = Intrinsic::getDeclaration(F->getParent(), 593 Intrinsic::aarch64_neon_faddp, Ty); 594 return true; 595 } 596 } 597 break; 598 } 599 600 case 'c': { 601 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 602 rename(F); 603 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 604 F->arg_begin()->getType()); 605 return true; 606 } 607 if (Name.startswith("cttz.") && F->arg_size() == 1) { 608 rename(F); 609 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 610 F->arg_begin()->getType()); 611 return true; 612 } 613 break; 614 } 615 case 'd': { 616 if (Name == "dbg.value" && F->arg_size() == 4) { 617 rename(F); 618 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value); 619 return true; 620 } 621 break; 622 } 623 case 'e': { 624 SmallVector<StringRef, 2> Groups; 625 static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[fi][0-9]+"); 626 if (R.match(Name, &Groups)) { 627 Intrinsic::ID ID = Intrinsic::not_intrinsic; 628 if (Groups[1] == "fadd") 629 ID = Intrinsic::experimental_vector_reduce_v2_fadd; 630 if (Groups[1] == "fmul") 631 ID = Intrinsic::experimental_vector_reduce_v2_fmul; 632 633 if (ID != Intrinsic::not_intrinsic) { 634 rename(F); 635 auto Args = F->getFunctionType()->params(); 636 Type *Tys[] = {F->getFunctionType()->getReturnType(), Args[1]}; 637 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys); 638 return true; 639 } 640 } 641 break; 642 } 643 case 'i': 644 case 'l': { 645 bool IsLifetimeStart = Name.startswith("lifetime.start"); 646 if (IsLifetimeStart || Name.startswith("invariant.start")) { 647 Intrinsic::ID ID = IsLifetimeStart ? 648 Intrinsic::lifetime_start : Intrinsic::invariant_start; 649 auto Args = F->getFunctionType()->params(); 650 Type* ObjectPtr[1] = {Args[1]}; 651 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 652 rename(F); 653 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 654 return true; 655 } 656 } 657 658 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 659 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 660 Intrinsic::ID ID = IsLifetimeEnd ? 661 Intrinsic::lifetime_end : Intrinsic::invariant_end; 662 663 auto Args = F->getFunctionType()->params(); 664 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 665 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 666 rename(F); 667 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 668 return true; 669 } 670 } 671 if (Name.startswith("invariant.group.barrier")) { 672 // Rename invariant.group.barrier to launder.invariant.group 673 auto Args = F->getFunctionType()->params(); 674 Type* ObjectPtr[1] = {Args[0]}; 675 rename(F); 676 NewFn = Intrinsic::getDeclaration(F->getParent(), 677 Intrinsic::launder_invariant_group, ObjectPtr); 678 return true; 679 680 } 681 682 break; 683 } 684 case 'm': { 685 if (Name.startswith("masked.load.")) { 686 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 687 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 688 rename(F); 689 NewFn = Intrinsic::getDeclaration(F->getParent(), 690 Intrinsic::masked_load, 691 Tys); 692 return true; 693 } 694 } 695 if (Name.startswith("masked.store.")) { 696 auto Args = F->getFunctionType()->params(); 697 Type *Tys[] = { Args[0], Args[1] }; 698 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 699 rename(F); 700 NewFn = Intrinsic::getDeclaration(F->getParent(), 701 Intrinsic::masked_store, 702 Tys); 703 return true; 704 } 705 } 706 // Renaming gather/scatter intrinsics with no address space overloading 707 // to the new overload which includes an address space 708 if (Name.startswith("masked.gather.")) { 709 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; 710 if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) { 711 rename(F); 712 NewFn = Intrinsic::getDeclaration(F->getParent(), 713 Intrinsic::masked_gather, Tys); 714 return true; 715 } 716 } 717 if (Name.startswith("masked.scatter.")) { 718 auto Args = F->getFunctionType()->params(); 719 Type *Tys[] = {Args[0], Args[1]}; 720 if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) { 721 rename(F); 722 NewFn = Intrinsic::getDeclaration(F->getParent(), 723 Intrinsic::masked_scatter, Tys); 724 return true; 725 } 726 } 727 // Updating the memory intrinsics (memcpy/memmove/memset) that have an 728 // alignment parameter to embedding the alignment as an attribute of 729 // the pointer args. 730 if (Name.startswith("memcpy.") && F->arg_size() == 5) { 731 rename(F); 732 // Get the types of dest, src, and len 733 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 734 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy, 735 ParamTypes); 736 return true; 737 } 738 if (Name.startswith("memmove.") && F->arg_size() == 5) { 739 rename(F); 740 // Get the types of dest, src, and len 741 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 742 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove, 743 ParamTypes); 744 return true; 745 } 746 if (Name.startswith("memset.") && F->arg_size() == 5) { 747 rename(F); 748 // Get the types of dest, and len 749 const auto *FT = F->getFunctionType(); 750 Type *ParamTypes[2] = { 751 FT->getParamType(0), // Dest 752 FT->getParamType(2) // len 753 }; 754 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset, 755 ParamTypes); 756 return true; 757 } 758 break; 759 } 760 case 'n': { 761 if (Name.startswith("nvvm.")) { 762 Name = Name.substr(5); 763 764 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 765 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 766 .Cases("brev32", "brev64", Intrinsic::bitreverse) 767 .Case("clz.i", Intrinsic::ctlz) 768 .Case("popc.i", Intrinsic::ctpop) 769 .Default(Intrinsic::not_intrinsic); 770 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 771 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 772 {F->getReturnType()}); 773 return true; 774 } 775 776 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 777 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 778 // 779 // TODO: We could add lohi.i2d. 780 bool Expand = StringSwitch<bool>(Name) 781 .Cases("abs.i", "abs.ll", true) 782 .Cases("clz.ll", "popc.ll", "h2f", true) 783 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 784 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 785 .StartsWith("atomic.load.add.f32.p", true) 786 .StartsWith("atomic.load.add.f64.p", true) 787 .Default(false); 788 if (Expand) { 789 NewFn = nullptr; 790 return true; 791 } 792 } 793 break; 794 } 795 case 'o': 796 // We only need to change the name to match the mangling including the 797 // address space. 798 if (Name.startswith("objectsize.")) { 799 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 800 if (F->arg_size() == 2 || F->arg_size() == 3 || 801 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 802 rename(F); 803 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 804 Tys); 805 return true; 806 } 807 } 808 break; 809 810 case 'p': 811 if (Name == "prefetch") { 812 // Handle address space overloading. 813 Type *Tys[] = {F->arg_begin()->getType()}; 814 if (F->getName() != Intrinsic::getName(Intrinsic::prefetch, Tys)) { 815 rename(F); 816 NewFn = 817 Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys); 818 return true; 819 } 820 } 821 break; 822 823 case 's': 824 if (Name == "stackprotectorcheck") { 825 NewFn = nullptr; 826 return true; 827 } 828 break; 829 830 case 'x': 831 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 832 return true; 833 } 834 // Remangle our intrinsic since we upgrade the mangling 835 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 836 if (Result != None) { 837 NewFn = Result.getValue(); 838 return true; 839 } 840 841 // This may not belong here. This function is effectively being overloaded 842 // to both detect an intrinsic which needs upgrading, and to provide the 843 // upgraded form of the intrinsic. We should perhaps have two separate 844 // functions for this. 845 return false; 846 } 847 848 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 849 NewFn = nullptr; 850 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 851 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 852 853 // Upgrade intrinsic attributes. This does not change the function. 854 if (NewFn) 855 F = NewFn; 856 if (Intrinsic::ID id = F->getIntrinsicID()) 857 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 858 return Upgraded; 859 } 860 861 GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 862 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" || 863 GV->getName() == "llvm.global_dtors")) || 864 !GV->hasInitializer()) 865 return nullptr; 866 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType()); 867 if (!ATy) 868 return nullptr; 869 StructType *STy = dyn_cast<StructType>(ATy->getElementType()); 870 if (!STy || STy->getNumElements() != 2) 871 return nullptr; 872 873 LLVMContext &C = GV->getContext(); 874 IRBuilder<> IRB(C); 875 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1), 876 IRB.getInt8PtrTy()); 877 Constant *Init = GV->getInitializer(); 878 unsigned N = Init->getNumOperands(); 879 std::vector<Constant *> NewCtors(N); 880 for (unsigned i = 0; i != N; ++i) { 881 auto Ctor = cast<Constant>(Init->getOperand(i)); 882 NewCtors[i] = ConstantStruct::get( 883 EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1), 884 Constant::getNullValue(IRB.getInt8PtrTy())); 885 } 886 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors); 887 888 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(), 889 NewInit, GV->getName()); 890 } 891 892 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 893 // to byte shuffles. 894 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 895 Value *Op, unsigned Shift) { 896 Type *ResultTy = Op->getType(); 897 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 898 899 // Bitcast from a 64-bit element type to a byte element type. 900 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 901 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 902 903 // We'll be shuffling in zeroes. 904 Value *Res = Constant::getNullValue(VecTy); 905 906 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 907 // we'll just return the zero vector. 908 if (Shift < 16) { 909 uint32_t Idxs[64]; 910 // 256/512-bit version is split into 2/4 16-byte lanes. 911 for (unsigned l = 0; l != NumElts; l += 16) 912 for (unsigned i = 0; i != 16; ++i) { 913 unsigned Idx = NumElts + i - Shift; 914 if (Idx < NumElts) 915 Idx -= NumElts - 16; // end of lane, switch operand. 916 Idxs[l + i] = Idx + l; 917 } 918 919 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 920 } 921 922 // Bitcast back to a 64-bit element type. 923 return Builder.CreateBitCast(Res, ResultTy, "cast"); 924 } 925 926 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 927 // to byte shuffles. 928 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 929 unsigned Shift) { 930 Type *ResultTy = Op->getType(); 931 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 932 933 // Bitcast from a 64-bit element type to a byte element type. 934 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 935 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 936 937 // We'll be shuffling in zeroes. 938 Value *Res = Constant::getNullValue(VecTy); 939 940 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 941 // we'll just return the zero vector. 942 if (Shift < 16) { 943 uint32_t Idxs[64]; 944 // 256/512-bit version is split into 2/4 16-byte lanes. 945 for (unsigned l = 0; l != NumElts; l += 16) 946 for (unsigned i = 0; i != 16; ++i) { 947 unsigned Idx = i + Shift; 948 if (Idx >= 16) 949 Idx += NumElts - 16; // end of lane, switch operand. 950 Idxs[l + i] = Idx + l; 951 } 952 953 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 954 } 955 956 // Bitcast back to a 64-bit element type. 957 return Builder.CreateBitCast(Res, ResultTy, "cast"); 958 } 959 960 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 961 unsigned NumElts) { 962 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 963 cast<IntegerType>(Mask->getType())->getBitWidth()); 964 Mask = Builder.CreateBitCast(Mask, MaskTy); 965 966 // If we have less than 8 elements, then the starting mask was an i8 and 967 // we need to extract down to the right number of elements. 968 if (NumElts < 8) { 969 uint32_t Indices[4]; 970 for (unsigned i = 0; i != NumElts; ++i) 971 Indices[i] = i; 972 Mask = Builder.CreateShuffleVector(Mask, Mask, 973 makeArrayRef(Indices, NumElts), 974 "extract"); 975 } 976 977 return Mask; 978 } 979 980 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 981 Value *Op0, Value *Op1) { 982 // If the mask is all ones just emit the first operation. 983 if (const auto *C = dyn_cast<Constant>(Mask)) 984 if (C->isAllOnesValue()) 985 return Op0; 986 987 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 988 return Builder.CreateSelect(Mask, Op0, Op1); 989 } 990 991 static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, 992 Value *Op0, Value *Op1) { 993 // If the mask is all ones just emit the first operation. 994 if (const auto *C = dyn_cast<Constant>(Mask)) 995 if (C->isAllOnesValue()) 996 return Op0; 997 998 llvm::VectorType *MaskTy = 999 llvm::VectorType::get(Builder.getInt1Ty(), 1000 Mask->getType()->getIntegerBitWidth()); 1001 Mask = Builder.CreateBitCast(Mask, MaskTy); 1002 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0); 1003 return Builder.CreateSelect(Mask, Op0, Op1); 1004 } 1005 1006 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 1007 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 1008 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 1009 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 1010 Value *Op1, Value *Shift, 1011 Value *Passthru, Value *Mask, 1012 bool IsVALIGN) { 1013 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 1014 1015 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1016 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 1017 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 1018 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 1019 1020 // Mask the immediate for VALIGN. 1021 if (IsVALIGN) 1022 ShiftVal &= (NumElts - 1); 1023 1024 // If palignr is shifting the pair of vectors more than the size of two 1025 // lanes, emit zero. 1026 if (ShiftVal >= 32) 1027 return llvm::Constant::getNullValue(Op0->getType()); 1028 1029 // If palignr is shifting the pair of input vectors more than one lane, 1030 // but less than two lanes, convert to shifting in zeroes. 1031 if (ShiftVal > 16) { 1032 ShiftVal -= 16; 1033 Op1 = Op0; 1034 Op0 = llvm::Constant::getNullValue(Op0->getType()); 1035 } 1036 1037 uint32_t Indices[64]; 1038 // 256-bit palignr operates on 128-bit lanes so we need to handle that 1039 for (unsigned l = 0; l < NumElts; l += 16) { 1040 for (unsigned i = 0; i != 16; ++i) { 1041 unsigned Idx = ShiftVal + i; 1042 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 1043 Idx += NumElts - 16; // End of lane, switch operand. 1044 Indices[l + i] = Idx + l; 1045 } 1046 } 1047 1048 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 1049 makeArrayRef(Indices, NumElts), 1050 "palignr"); 1051 1052 return EmitX86Select(Builder, Mask, Align, Passthru); 1053 } 1054 1055 static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI, 1056 bool ZeroMask, bool IndexForm) { 1057 Type *Ty = CI.getType(); 1058 unsigned VecWidth = Ty->getPrimitiveSizeInBits(); 1059 unsigned EltWidth = Ty->getScalarSizeInBits(); 1060 bool IsFloat = Ty->isFPOrFPVectorTy(); 1061 Intrinsic::ID IID; 1062 if (VecWidth == 128 && EltWidth == 32 && IsFloat) 1063 IID = Intrinsic::x86_avx512_vpermi2var_ps_128; 1064 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat) 1065 IID = Intrinsic::x86_avx512_vpermi2var_d_128; 1066 else if (VecWidth == 128 && EltWidth == 64 && IsFloat) 1067 IID = Intrinsic::x86_avx512_vpermi2var_pd_128; 1068 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat) 1069 IID = Intrinsic::x86_avx512_vpermi2var_q_128; 1070 else if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1071 IID = Intrinsic::x86_avx512_vpermi2var_ps_256; 1072 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1073 IID = Intrinsic::x86_avx512_vpermi2var_d_256; 1074 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1075 IID = Intrinsic::x86_avx512_vpermi2var_pd_256; 1076 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1077 IID = Intrinsic::x86_avx512_vpermi2var_q_256; 1078 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1079 IID = Intrinsic::x86_avx512_vpermi2var_ps_512; 1080 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1081 IID = Intrinsic::x86_avx512_vpermi2var_d_512; 1082 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1083 IID = Intrinsic::x86_avx512_vpermi2var_pd_512; 1084 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1085 IID = Intrinsic::x86_avx512_vpermi2var_q_512; 1086 else if (VecWidth == 128 && EltWidth == 16) 1087 IID = Intrinsic::x86_avx512_vpermi2var_hi_128; 1088 else if (VecWidth == 256 && EltWidth == 16) 1089 IID = Intrinsic::x86_avx512_vpermi2var_hi_256; 1090 else if (VecWidth == 512 && EltWidth == 16) 1091 IID = Intrinsic::x86_avx512_vpermi2var_hi_512; 1092 else if (VecWidth == 128 && EltWidth == 8) 1093 IID = Intrinsic::x86_avx512_vpermi2var_qi_128; 1094 else if (VecWidth == 256 && EltWidth == 8) 1095 IID = Intrinsic::x86_avx512_vpermi2var_qi_256; 1096 else if (VecWidth == 512 && EltWidth == 8) 1097 IID = Intrinsic::x86_avx512_vpermi2var_qi_512; 1098 else 1099 llvm_unreachable("Unexpected intrinsic"); 1100 1101 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1), 1102 CI.getArgOperand(2) }; 1103 1104 // If this isn't index form we need to swap operand 0 and 1. 1105 if (!IndexForm) 1106 std::swap(Args[0], Args[1]); 1107 1108 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1109 Args); 1110 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) 1111 : Builder.CreateBitCast(CI.getArgOperand(1), 1112 Ty); 1113 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru); 1114 } 1115 1116 static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI, 1117 bool IsSigned, bool IsAddition) { 1118 Type *Ty = CI.getType(); 1119 Value *Op0 = CI.getOperand(0); 1120 Value *Op1 = CI.getOperand(1); 1121 1122 Intrinsic::ID IID = 1123 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat) 1124 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat); 1125 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1126 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1}); 1127 1128 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1129 Value *VecSrc = CI.getOperand(2); 1130 Value *Mask = CI.getOperand(3); 1131 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1132 } 1133 return Res; 1134 } 1135 1136 static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI, 1137 bool IsRotateRight) { 1138 Type *Ty = CI.getType(); 1139 Value *Src = CI.getArgOperand(0); 1140 Value *Amt = CI.getArgOperand(1); 1141 1142 // Amount may be scalar immediate, in which case create a splat vector. 1143 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1144 // we only care about the lowest log2 bits anyway. 1145 if (Amt->getType() != Ty) { 1146 unsigned NumElts = Ty->getVectorNumElements(); 1147 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1148 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1149 } 1150 1151 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; 1152 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1153 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt}); 1154 1155 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1156 Value *VecSrc = CI.getOperand(2); 1157 Value *Mask = CI.getOperand(3); 1158 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1159 } 1160 return Res; 1161 } 1162 1163 static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallInst &CI, unsigned Imm, 1164 bool IsSigned) { 1165 Type *Ty = CI.getType(); 1166 Value *LHS = CI.getArgOperand(0); 1167 Value *RHS = CI.getArgOperand(1); 1168 1169 CmpInst::Predicate Pred; 1170 switch (Imm) { 1171 case 0x0: 1172 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 1173 break; 1174 case 0x1: 1175 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 1176 break; 1177 case 0x2: 1178 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 1179 break; 1180 case 0x3: 1181 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 1182 break; 1183 case 0x4: 1184 Pred = ICmpInst::ICMP_EQ; 1185 break; 1186 case 0x5: 1187 Pred = ICmpInst::ICMP_NE; 1188 break; 1189 case 0x6: 1190 return Constant::getNullValue(Ty); // FALSE 1191 case 0x7: 1192 return Constant::getAllOnesValue(Ty); // TRUE 1193 default: 1194 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate"); 1195 } 1196 1197 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS); 1198 Value *Ext = Builder.CreateSExt(Cmp, Ty); 1199 return Ext; 1200 } 1201 1202 static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI, 1203 bool IsShiftRight, bool ZeroMask) { 1204 Type *Ty = CI.getType(); 1205 Value *Op0 = CI.getArgOperand(0); 1206 Value *Op1 = CI.getArgOperand(1); 1207 Value *Amt = CI.getArgOperand(2); 1208 1209 if (IsShiftRight) 1210 std::swap(Op0, Op1); 1211 1212 // Amount may be scalar immediate, in which case create a splat vector. 1213 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1214 // we only care about the lowest log2 bits anyway. 1215 if (Amt->getType() != Ty) { 1216 unsigned NumElts = Ty->getVectorNumElements(); 1217 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1218 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1219 } 1220 1221 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl; 1222 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1223 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt}); 1224 1225 unsigned NumArgs = CI.getNumArgOperands(); 1226 if (NumArgs >= 4) { // For masked intrinsics. 1227 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) : 1228 ZeroMask ? ConstantAggregateZero::get(CI.getType()) : 1229 CI.getArgOperand(0); 1230 Value *Mask = CI.getOperand(NumArgs - 1); 1231 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1232 } 1233 return Res; 1234 } 1235 1236 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 1237 Value *Ptr, Value *Data, Value *Mask, 1238 bool Aligned) { 1239 // Cast the pointer to the right type. 1240 Ptr = Builder.CreateBitCast(Ptr, 1241 llvm::PointerType::getUnqual(Data->getType())); 1242 const Align Alignment = 1243 Aligned ? Align(cast<VectorType>(Data->getType())->getBitWidth() / 8) 1244 : Align(1); 1245 1246 // If the mask is all ones just emit a regular store. 1247 if (const auto *C = dyn_cast<Constant>(Mask)) 1248 if (C->isAllOnesValue()) 1249 return Builder.CreateAlignedStore(Data, Ptr, Alignment); 1250 1251 // Convert the mask from an integer type to a vector of i1. 1252 unsigned NumElts = Data->getType()->getVectorNumElements(); 1253 Mask = getX86MaskVec(Builder, Mask, NumElts); 1254 return Builder.CreateMaskedStore(Data, Ptr, Alignment, Mask); 1255 } 1256 1257 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 1258 Value *Ptr, Value *Passthru, Value *Mask, 1259 bool Aligned) { 1260 Type *ValTy = Passthru->getType(); 1261 // Cast the pointer to the right type. 1262 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy)); 1263 const Align Alignment = 1264 Aligned ? Align(cast<VectorType>(Passthru->getType())->getBitWidth() / 8) 1265 : Align(1); 1266 1267 // If the mask is all ones just emit a regular store. 1268 if (const auto *C = dyn_cast<Constant>(Mask)) 1269 if (C->isAllOnesValue()) 1270 return Builder.CreateAlignedLoad(ValTy, Ptr, Alignment); 1271 1272 // Convert the mask from an integer type to a vector of i1. 1273 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 1274 Mask = getX86MaskVec(Builder, Mask, NumElts); 1275 return Builder.CreateMaskedLoad(Ptr, Alignment, Mask, Passthru); 1276 } 1277 1278 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { 1279 Value *Op0 = CI.getArgOperand(0); 1280 llvm::Type *Ty = Op0->getType(); 1281 Value *Zero = llvm::Constant::getNullValue(Ty); 1282 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero); 1283 Value *Neg = Builder.CreateNeg(Op0); 1284 Value *Res = Builder.CreateSelect(Cmp, Op0, Neg); 1285 1286 if (CI.getNumArgOperands() == 3) 1287 Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1)); 1288 1289 return Res; 1290 } 1291 1292 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 1293 ICmpInst::Predicate Pred) { 1294 Value *Op0 = CI.getArgOperand(0); 1295 Value *Op1 = CI.getArgOperand(1); 1296 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 1297 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 1298 1299 if (CI.getNumArgOperands() == 4) 1300 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1301 1302 return Res; 1303 } 1304 1305 static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) { 1306 Type *Ty = CI.getType(); 1307 1308 // Arguments have a vXi32 type so cast to vXi64. 1309 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty); 1310 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty); 1311 1312 if (IsSigned) { 1313 // Shift left then arithmetic shift right. 1314 Constant *ShiftAmt = ConstantInt::get(Ty, 32); 1315 LHS = Builder.CreateShl(LHS, ShiftAmt); 1316 LHS = Builder.CreateAShr(LHS, ShiftAmt); 1317 RHS = Builder.CreateShl(RHS, ShiftAmt); 1318 RHS = Builder.CreateAShr(RHS, ShiftAmt); 1319 } else { 1320 // Clear the upper bits. 1321 Constant *Mask = ConstantInt::get(Ty, 0xffffffff); 1322 LHS = Builder.CreateAnd(LHS, Mask); 1323 RHS = Builder.CreateAnd(RHS, Mask); 1324 } 1325 1326 Value *Res = Builder.CreateMul(LHS, RHS); 1327 1328 if (CI.getNumArgOperands() == 4) 1329 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1330 1331 return Res; 1332 } 1333 1334 // Applying mask on vector of i1's and make sure result is at least 8 bits wide. 1335 static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, 1336 Value *Mask) { 1337 unsigned NumElts = Vec->getType()->getVectorNumElements(); 1338 if (Mask) { 1339 const auto *C = dyn_cast<Constant>(Mask); 1340 if (!C || !C->isAllOnesValue()) 1341 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts)); 1342 } 1343 1344 if (NumElts < 8) { 1345 uint32_t Indices[8]; 1346 for (unsigned i = 0; i != NumElts; ++i) 1347 Indices[i] = i; 1348 for (unsigned i = NumElts; i != 8; ++i) 1349 Indices[i] = NumElts + i % NumElts; 1350 Vec = Builder.CreateShuffleVector(Vec, 1351 Constant::getNullValue(Vec->getType()), 1352 Indices); 1353 } 1354 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U))); 1355 } 1356 1357 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 1358 unsigned CC, bool Signed) { 1359 Value *Op0 = CI.getArgOperand(0); 1360 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1361 1362 Value *Cmp; 1363 if (CC == 3) { 1364 Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1365 } else if (CC == 7) { 1366 Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1367 } else { 1368 ICmpInst::Predicate Pred; 1369 switch (CC) { 1370 default: llvm_unreachable("Unknown condition code"); 1371 case 0: Pred = ICmpInst::ICMP_EQ; break; 1372 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 1373 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 1374 case 4: Pred = ICmpInst::ICMP_NE; break; 1375 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 1376 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 1377 } 1378 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 1379 } 1380 1381 Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1); 1382 1383 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask); 1384 } 1385 1386 // Replace a masked intrinsic with an older unmasked intrinsic. 1387 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 1388 Intrinsic::ID IID) { 1389 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID); 1390 Value *Rep = Builder.CreateCall(Intrin, 1391 { CI.getArgOperand(0), CI.getArgOperand(1) }); 1392 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 1393 } 1394 1395 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 1396 Value* A = CI.getArgOperand(0); 1397 Value* B = CI.getArgOperand(1); 1398 Value* Src = CI.getArgOperand(2); 1399 Value* Mask = CI.getArgOperand(3); 1400 1401 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 1402 Value* Cmp = Builder.CreateIsNotNull(AndNode); 1403 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 1404 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 1405 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 1406 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 1407 } 1408 1409 1410 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 1411 Value* Op = CI.getArgOperand(0); 1412 Type* ReturnOp = CI.getType(); 1413 unsigned NumElts = CI.getType()->getVectorNumElements(); 1414 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 1415 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 1416 } 1417 1418 // Replace intrinsic with unmasked version and a select. 1419 static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, 1420 CallInst &CI, Value *&Rep) { 1421 Name = Name.substr(12); // Remove avx512.mask. 1422 1423 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits(); 1424 unsigned EltWidth = CI.getType()->getScalarSizeInBits(); 1425 Intrinsic::ID IID; 1426 if (Name.startswith("max.p")) { 1427 if (VecWidth == 128 && EltWidth == 32) 1428 IID = Intrinsic::x86_sse_max_ps; 1429 else if (VecWidth == 128 && EltWidth == 64) 1430 IID = Intrinsic::x86_sse2_max_pd; 1431 else if (VecWidth == 256 && EltWidth == 32) 1432 IID = Intrinsic::x86_avx_max_ps_256; 1433 else if (VecWidth == 256 && EltWidth == 64) 1434 IID = Intrinsic::x86_avx_max_pd_256; 1435 else 1436 llvm_unreachable("Unexpected intrinsic"); 1437 } else if (Name.startswith("min.p")) { 1438 if (VecWidth == 128 && EltWidth == 32) 1439 IID = Intrinsic::x86_sse_min_ps; 1440 else if (VecWidth == 128 && EltWidth == 64) 1441 IID = Intrinsic::x86_sse2_min_pd; 1442 else if (VecWidth == 256 && EltWidth == 32) 1443 IID = Intrinsic::x86_avx_min_ps_256; 1444 else if (VecWidth == 256 && EltWidth == 64) 1445 IID = Intrinsic::x86_avx_min_pd_256; 1446 else 1447 llvm_unreachable("Unexpected intrinsic"); 1448 } else if (Name.startswith("pshuf.b.")) { 1449 if (VecWidth == 128) 1450 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1451 else if (VecWidth == 256) 1452 IID = Intrinsic::x86_avx2_pshuf_b; 1453 else if (VecWidth == 512) 1454 IID = Intrinsic::x86_avx512_pshuf_b_512; 1455 else 1456 llvm_unreachable("Unexpected intrinsic"); 1457 } else if (Name.startswith("pmul.hr.sw.")) { 1458 if (VecWidth == 128) 1459 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128; 1460 else if (VecWidth == 256) 1461 IID = Intrinsic::x86_avx2_pmul_hr_sw; 1462 else if (VecWidth == 512) 1463 IID = Intrinsic::x86_avx512_pmul_hr_sw_512; 1464 else 1465 llvm_unreachable("Unexpected intrinsic"); 1466 } else if (Name.startswith("pmulh.w.")) { 1467 if (VecWidth == 128) 1468 IID = Intrinsic::x86_sse2_pmulh_w; 1469 else if (VecWidth == 256) 1470 IID = Intrinsic::x86_avx2_pmulh_w; 1471 else if (VecWidth == 512) 1472 IID = Intrinsic::x86_avx512_pmulh_w_512; 1473 else 1474 llvm_unreachable("Unexpected intrinsic"); 1475 } else if (Name.startswith("pmulhu.w.")) { 1476 if (VecWidth == 128) 1477 IID = Intrinsic::x86_sse2_pmulhu_w; 1478 else if (VecWidth == 256) 1479 IID = Intrinsic::x86_avx2_pmulhu_w; 1480 else if (VecWidth == 512) 1481 IID = Intrinsic::x86_avx512_pmulhu_w_512; 1482 else 1483 llvm_unreachable("Unexpected intrinsic"); 1484 } else if (Name.startswith("pmaddw.d.")) { 1485 if (VecWidth == 128) 1486 IID = Intrinsic::x86_sse2_pmadd_wd; 1487 else if (VecWidth == 256) 1488 IID = Intrinsic::x86_avx2_pmadd_wd; 1489 else if (VecWidth == 512) 1490 IID = Intrinsic::x86_avx512_pmaddw_d_512; 1491 else 1492 llvm_unreachable("Unexpected intrinsic"); 1493 } else if (Name.startswith("pmaddubs.w.")) { 1494 if (VecWidth == 128) 1495 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128; 1496 else if (VecWidth == 256) 1497 IID = Intrinsic::x86_avx2_pmadd_ub_sw; 1498 else if (VecWidth == 512) 1499 IID = Intrinsic::x86_avx512_pmaddubs_w_512; 1500 else 1501 llvm_unreachable("Unexpected intrinsic"); 1502 } else if (Name.startswith("packsswb.")) { 1503 if (VecWidth == 128) 1504 IID = Intrinsic::x86_sse2_packsswb_128; 1505 else if (VecWidth == 256) 1506 IID = Intrinsic::x86_avx2_packsswb; 1507 else if (VecWidth == 512) 1508 IID = Intrinsic::x86_avx512_packsswb_512; 1509 else 1510 llvm_unreachable("Unexpected intrinsic"); 1511 } else if (Name.startswith("packssdw.")) { 1512 if (VecWidth == 128) 1513 IID = Intrinsic::x86_sse2_packssdw_128; 1514 else if (VecWidth == 256) 1515 IID = Intrinsic::x86_avx2_packssdw; 1516 else if (VecWidth == 512) 1517 IID = Intrinsic::x86_avx512_packssdw_512; 1518 else 1519 llvm_unreachable("Unexpected intrinsic"); 1520 } else if (Name.startswith("packuswb.")) { 1521 if (VecWidth == 128) 1522 IID = Intrinsic::x86_sse2_packuswb_128; 1523 else if (VecWidth == 256) 1524 IID = Intrinsic::x86_avx2_packuswb; 1525 else if (VecWidth == 512) 1526 IID = Intrinsic::x86_avx512_packuswb_512; 1527 else 1528 llvm_unreachable("Unexpected intrinsic"); 1529 } else if (Name.startswith("packusdw.")) { 1530 if (VecWidth == 128) 1531 IID = Intrinsic::x86_sse41_packusdw; 1532 else if (VecWidth == 256) 1533 IID = Intrinsic::x86_avx2_packusdw; 1534 else if (VecWidth == 512) 1535 IID = Intrinsic::x86_avx512_packusdw_512; 1536 else 1537 llvm_unreachable("Unexpected intrinsic"); 1538 } else if (Name.startswith("vpermilvar.")) { 1539 if (VecWidth == 128 && EltWidth == 32) 1540 IID = Intrinsic::x86_avx_vpermilvar_ps; 1541 else if (VecWidth == 128 && EltWidth == 64) 1542 IID = Intrinsic::x86_avx_vpermilvar_pd; 1543 else if (VecWidth == 256 && EltWidth == 32) 1544 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 1545 else if (VecWidth == 256 && EltWidth == 64) 1546 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 1547 else if (VecWidth == 512 && EltWidth == 32) 1548 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 1549 else if (VecWidth == 512 && EltWidth == 64) 1550 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 1551 else 1552 llvm_unreachable("Unexpected intrinsic"); 1553 } else if (Name == "cvtpd2dq.256") { 1554 IID = Intrinsic::x86_avx_cvt_pd2dq_256; 1555 } else if (Name == "cvtpd2ps.256") { 1556 IID = Intrinsic::x86_avx_cvt_pd2_ps_256; 1557 } else if (Name == "cvttpd2dq.256") { 1558 IID = Intrinsic::x86_avx_cvtt_pd2dq_256; 1559 } else if (Name == "cvttps2dq.128") { 1560 IID = Intrinsic::x86_sse2_cvttps2dq; 1561 } else if (Name == "cvttps2dq.256") { 1562 IID = Intrinsic::x86_avx_cvtt_ps2dq_256; 1563 } else if (Name.startswith("permvar.")) { 1564 bool IsFloat = CI.getType()->isFPOrFPVectorTy(); 1565 if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1566 IID = Intrinsic::x86_avx2_permps; 1567 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1568 IID = Intrinsic::x86_avx2_permd; 1569 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1570 IID = Intrinsic::x86_avx512_permvar_df_256; 1571 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1572 IID = Intrinsic::x86_avx512_permvar_di_256; 1573 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1574 IID = Intrinsic::x86_avx512_permvar_sf_512; 1575 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1576 IID = Intrinsic::x86_avx512_permvar_si_512; 1577 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1578 IID = Intrinsic::x86_avx512_permvar_df_512; 1579 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1580 IID = Intrinsic::x86_avx512_permvar_di_512; 1581 else if (VecWidth == 128 && EltWidth == 16) 1582 IID = Intrinsic::x86_avx512_permvar_hi_128; 1583 else if (VecWidth == 256 && EltWidth == 16) 1584 IID = Intrinsic::x86_avx512_permvar_hi_256; 1585 else if (VecWidth == 512 && EltWidth == 16) 1586 IID = Intrinsic::x86_avx512_permvar_hi_512; 1587 else if (VecWidth == 128 && EltWidth == 8) 1588 IID = Intrinsic::x86_avx512_permvar_qi_128; 1589 else if (VecWidth == 256 && EltWidth == 8) 1590 IID = Intrinsic::x86_avx512_permvar_qi_256; 1591 else if (VecWidth == 512 && EltWidth == 8) 1592 IID = Intrinsic::x86_avx512_permvar_qi_512; 1593 else 1594 llvm_unreachable("Unexpected intrinsic"); 1595 } else if (Name.startswith("dbpsadbw.")) { 1596 if (VecWidth == 128) 1597 IID = Intrinsic::x86_avx512_dbpsadbw_128; 1598 else if (VecWidth == 256) 1599 IID = Intrinsic::x86_avx512_dbpsadbw_256; 1600 else if (VecWidth == 512) 1601 IID = Intrinsic::x86_avx512_dbpsadbw_512; 1602 else 1603 llvm_unreachable("Unexpected intrinsic"); 1604 } else if (Name.startswith("pmultishift.qb.")) { 1605 if (VecWidth == 128) 1606 IID = Intrinsic::x86_avx512_pmultishift_qb_128; 1607 else if (VecWidth == 256) 1608 IID = Intrinsic::x86_avx512_pmultishift_qb_256; 1609 else if (VecWidth == 512) 1610 IID = Intrinsic::x86_avx512_pmultishift_qb_512; 1611 else 1612 llvm_unreachable("Unexpected intrinsic"); 1613 } else if (Name.startswith("conflict.")) { 1614 if (Name[9] == 'd' && VecWidth == 128) 1615 IID = Intrinsic::x86_avx512_conflict_d_128; 1616 else if (Name[9] == 'd' && VecWidth == 256) 1617 IID = Intrinsic::x86_avx512_conflict_d_256; 1618 else if (Name[9] == 'd' && VecWidth == 512) 1619 IID = Intrinsic::x86_avx512_conflict_d_512; 1620 else if (Name[9] == 'q' && VecWidth == 128) 1621 IID = Intrinsic::x86_avx512_conflict_q_128; 1622 else if (Name[9] == 'q' && VecWidth == 256) 1623 IID = Intrinsic::x86_avx512_conflict_q_256; 1624 else if (Name[9] == 'q' && VecWidth == 512) 1625 IID = Intrinsic::x86_avx512_conflict_q_512; 1626 else 1627 llvm_unreachable("Unexpected intrinsic"); 1628 } else if (Name.startswith("pavg.")) { 1629 if (Name[5] == 'b' && VecWidth == 128) 1630 IID = Intrinsic::x86_sse2_pavg_b; 1631 else if (Name[5] == 'b' && VecWidth == 256) 1632 IID = Intrinsic::x86_avx2_pavg_b; 1633 else if (Name[5] == 'b' && VecWidth == 512) 1634 IID = Intrinsic::x86_avx512_pavg_b_512; 1635 else if (Name[5] == 'w' && VecWidth == 128) 1636 IID = Intrinsic::x86_sse2_pavg_w; 1637 else if (Name[5] == 'w' && VecWidth == 256) 1638 IID = Intrinsic::x86_avx2_pavg_w; 1639 else if (Name[5] == 'w' && VecWidth == 512) 1640 IID = Intrinsic::x86_avx512_pavg_w_512; 1641 else 1642 llvm_unreachable("Unexpected intrinsic"); 1643 } else 1644 return false; 1645 1646 SmallVector<Value *, 4> Args(CI.arg_operands().begin(), 1647 CI.arg_operands().end()); 1648 Args.pop_back(); 1649 Args.pop_back(); 1650 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1651 Args); 1652 unsigned NumArgs = CI.getNumArgOperands(); 1653 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep, 1654 CI.getArgOperand(NumArgs - 2)); 1655 return true; 1656 } 1657 1658 /// Upgrade comment in call to inline asm that represents an objc retain release 1659 /// marker. 1660 void llvm::UpgradeInlineAsmString(std::string *AsmStr) { 1661 size_t Pos; 1662 if (AsmStr->find("mov\tfp") == 0 && 1663 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos && 1664 (Pos = AsmStr->find("# marker")) != std::string::npos) { 1665 AsmStr->replace(Pos, 1, ";"); 1666 } 1667 return; 1668 } 1669 1670 /// Upgrade a call to an old intrinsic. All argument and return casting must be 1671 /// provided to seamlessly integrate with existing context. 1672 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 1673 Function *F = CI->getCalledFunction(); 1674 LLVMContext &C = CI->getContext(); 1675 IRBuilder<> Builder(C); 1676 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 1677 1678 assert(F && "Intrinsic call is not direct?"); 1679 1680 if (!NewFn) { 1681 // Get the Function's name. 1682 StringRef Name = F->getName(); 1683 1684 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 1685 Name = Name.substr(5); 1686 1687 bool IsX86 = Name.startswith("x86."); 1688 if (IsX86) 1689 Name = Name.substr(4); 1690 bool IsNVVM = Name.startswith("nvvm."); 1691 if (IsNVVM) 1692 Name = Name.substr(5); 1693 1694 if (IsX86 && Name.startswith("sse4a.movnt.")) { 1695 Module *M = F->getParent(); 1696 SmallVector<Metadata *, 1> Elts; 1697 Elts.push_back( 1698 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1699 MDNode *Node = MDNode::get(C, Elts); 1700 1701 Value *Arg0 = CI->getArgOperand(0); 1702 Value *Arg1 = CI->getArgOperand(1); 1703 1704 // Nontemporal (unaligned) store of the 0'th element of the float/double 1705 // vector. 1706 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 1707 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 1708 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 1709 Value *Extract = 1710 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 1711 1712 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, Align(1)); 1713 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1714 1715 // Remove intrinsic. 1716 CI->eraseFromParent(); 1717 return; 1718 } 1719 1720 if (IsX86 && (Name.startswith("avx.movnt.") || 1721 Name.startswith("avx512.storent."))) { 1722 Module *M = F->getParent(); 1723 SmallVector<Metadata *, 1> Elts; 1724 Elts.push_back( 1725 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1726 MDNode *Node = MDNode::get(C, Elts); 1727 1728 Value *Arg0 = CI->getArgOperand(0); 1729 Value *Arg1 = CI->getArgOperand(1); 1730 1731 // Convert the type of the pointer to a pointer to the stored type. 1732 Value *BC = Builder.CreateBitCast(Arg0, 1733 PointerType::getUnqual(Arg1->getType()), 1734 "cast"); 1735 VectorType *VTy = cast<VectorType>(Arg1->getType()); 1736 StoreInst *SI = 1737 Builder.CreateAlignedStore(Arg1, BC, Align(VTy->getBitWidth() / 8)); 1738 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1739 1740 // Remove intrinsic. 1741 CI->eraseFromParent(); 1742 return; 1743 } 1744 1745 if (IsX86 && Name == "sse2.storel.dq") { 1746 Value *Arg0 = CI->getArgOperand(0); 1747 Value *Arg1 = CI->getArgOperand(1); 1748 1749 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 1750 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 1751 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 1752 Value *BC = Builder.CreateBitCast(Arg0, 1753 PointerType::getUnqual(Elt->getType()), 1754 "cast"); 1755 Builder.CreateAlignedStore(Elt, BC, Align(1)); 1756 1757 // Remove intrinsic. 1758 CI->eraseFromParent(); 1759 return; 1760 } 1761 1762 if (IsX86 && (Name.startswith("sse.storeu.") || 1763 Name.startswith("sse2.storeu.") || 1764 Name.startswith("avx.storeu."))) { 1765 Value *Arg0 = CI->getArgOperand(0); 1766 Value *Arg1 = CI->getArgOperand(1); 1767 1768 Arg0 = Builder.CreateBitCast(Arg0, 1769 PointerType::getUnqual(Arg1->getType()), 1770 "cast"); 1771 Builder.CreateAlignedStore(Arg1, Arg0, Align(1)); 1772 1773 // Remove intrinsic. 1774 CI->eraseFromParent(); 1775 return; 1776 } 1777 1778 if (IsX86 && Name == "avx512.mask.store.ss") { 1779 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1)); 1780 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1781 Mask, false); 1782 1783 // Remove intrinsic. 1784 CI->eraseFromParent(); 1785 return; 1786 } 1787 1788 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 1789 // "avx512.mask.storeu." or "avx512.mask.store." 1790 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 1791 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1792 CI->getArgOperand(2), Aligned); 1793 1794 // Remove intrinsic. 1795 CI->eraseFromParent(); 1796 return; 1797 } 1798 1799 Value *Rep; 1800 // Upgrade packed integer vector compare intrinsics to compare instructions. 1801 if (IsX86 && (Name.startswith("sse2.pcmp") || 1802 Name.startswith("avx2.pcmp"))) { 1803 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 1804 bool CmpEq = Name[9] == 'e'; 1805 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 1806 CI->getArgOperand(0), CI->getArgOperand(1)); 1807 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 1808 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) { 1809 Type *ExtTy = Type::getInt32Ty(C); 1810 if (CI->getOperand(0)->getType()->isIntegerTy(8)) 1811 ExtTy = Type::getInt64Ty(C); 1812 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 1813 ExtTy->getPrimitiveSizeInBits(); 1814 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy); 1815 Rep = Builder.CreateVectorSplat(NumElts, Rep); 1816 } else if (IsX86 && (Name == "sse.sqrt.ss" || 1817 Name == "sse2.sqrt.sd")) { 1818 Value *Vec = CI->getArgOperand(0); 1819 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0); 1820 Function *Intr = Intrinsic::getDeclaration(F->getParent(), 1821 Intrinsic::sqrt, Elt0->getType()); 1822 Elt0 = Builder.CreateCall(Intr, Elt0); 1823 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0); 1824 } else if (IsX86 && (Name.startswith("avx.sqrt.p") || 1825 Name.startswith("sse2.sqrt.p") || 1826 Name.startswith("sse.sqrt.p"))) { 1827 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1828 Intrinsic::sqrt, 1829 CI->getType()), 1830 {CI->getArgOperand(0)}); 1831 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) { 1832 if (CI->getNumArgOperands() == 4 && 1833 (!isa<ConstantInt>(CI->getArgOperand(3)) || 1834 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 1835 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512 1836 : Intrinsic::x86_avx512_sqrt_pd_512; 1837 1838 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) }; 1839 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 1840 IID), Args); 1841 } else { 1842 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1843 Intrinsic::sqrt, 1844 CI->getType()), 1845 {CI->getArgOperand(0)}); 1846 } 1847 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1848 CI->getArgOperand(1)); 1849 } else if (IsX86 && (Name.startswith("avx512.ptestm") || 1850 Name.startswith("avx512.ptestnm"))) { 1851 Value *Op0 = CI->getArgOperand(0); 1852 Value *Op1 = CI->getArgOperand(1); 1853 Value *Mask = CI->getArgOperand(2); 1854 Rep = Builder.CreateAnd(Op0, Op1); 1855 llvm::Type *Ty = Op0->getType(); 1856 Value *Zero = llvm::Constant::getNullValue(Ty); 1857 ICmpInst::Predicate Pred = 1858 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ; 1859 Rep = Builder.CreateICmp(Pred, Rep, Zero); 1860 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); 1861 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ 1862 unsigned NumElts = 1863 CI->getArgOperand(1)->getType()->getVectorNumElements(); 1864 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); 1865 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1866 CI->getArgOperand(1)); 1867 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) { 1868 unsigned NumElts = CI->getType()->getScalarSizeInBits(); 1869 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts); 1870 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts); 1871 uint32_t Indices[64]; 1872 for (unsigned i = 0; i != NumElts; ++i) 1873 Indices[i] = i; 1874 1875 // First extract half of each vector. This gives better codegen than 1876 // doing it in a single shuffle. 1877 LHS = Builder.CreateShuffleVector(LHS, LHS, 1878 makeArrayRef(Indices, NumElts / 2)); 1879 RHS = Builder.CreateShuffleVector(RHS, RHS, 1880 makeArrayRef(Indices, NumElts / 2)); 1881 // Concat the vectors. 1882 // NOTE: Operands have to be swapped to match intrinsic definition. 1883 Rep = Builder.CreateShuffleVector(RHS, LHS, 1884 makeArrayRef(Indices, NumElts)); 1885 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1886 } else if (IsX86 && Name == "avx512.kand.w") { 1887 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1888 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1889 Rep = Builder.CreateAnd(LHS, RHS); 1890 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1891 } else if (IsX86 && Name == "avx512.kandn.w") { 1892 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1893 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1894 LHS = Builder.CreateNot(LHS); 1895 Rep = Builder.CreateAnd(LHS, RHS); 1896 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1897 } else if (IsX86 && Name == "avx512.kor.w") { 1898 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1899 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1900 Rep = Builder.CreateOr(LHS, RHS); 1901 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1902 } else if (IsX86 && Name == "avx512.kxor.w") { 1903 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1904 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1905 Rep = Builder.CreateXor(LHS, RHS); 1906 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1907 } else if (IsX86 && Name == "avx512.kxnor.w") { 1908 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1909 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1910 LHS = Builder.CreateNot(LHS); 1911 Rep = Builder.CreateXor(LHS, RHS); 1912 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1913 } else if (IsX86 && Name == "avx512.knot.w") { 1914 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1915 Rep = Builder.CreateNot(Rep); 1916 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1917 } else if (IsX86 && 1918 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) { 1919 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1920 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1921 Rep = Builder.CreateOr(LHS, RHS); 1922 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty()); 1923 Value *C; 1924 if (Name[14] == 'c') 1925 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty()); 1926 else 1927 C = ConstantInt::getNullValue(Builder.getInt16Ty()); 1928 Rep = Builder.CreateICmpEQ(Rep, C); 1929 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty()); 1930 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" || 1931 Name == "sse.sub.ss" || Name == "sse2.sub.sd" || 1932 Name == "sse.mul.ss" || Name == "sse2.mul.sd" || 1933 Name == "sse.div.ss" || Name == "sse2.div.sd")) { 1934 Type *I32Ty = Type::getInt32Ty(C); 1935 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1936 ConstantInt::get(I32Ty, 0)); 1937 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1938 ConstantInt::get(I32Ty, 0)); 1939 Value *EltOp; 1940 if (Name.contains(".add.")) 1941 EltOp = Builder.CreateFAdd(Elt0, Elt1); 1942 else if (Name.contains(".sub.")) 1943 EltOp = Builder.CreateFSub(Elt0, Elt1); 1944 else if (Name.contains(".mul.")) 1945 EltOp = Builder.CreateFMul(Elt0, Elt1); 1946 else 1947 EltOp = Builder.CreateFDiv(Elt0, Elt1); 1948 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp, 1949 ConstantInt::get(I32Ty, 0)); 1950 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 1951 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 1952 bool CmpEq = Name[16] == 'e'; 1953 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true); 1954 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) { 1955 Type *OpTy = CI->getArgOperand(0)->getType(); 1956 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1957 Intrinsic::ID IID; 1958 switch (VecWidth) { 1959 default: llvm_unreachable("Unexpected intrinsic"); 1960 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break; 1961 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break; 1962 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break; 1963 } 1964 1965 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1966 { CI->getOperand(0), CI->getArgOperand(1) }); 1967 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1968 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) { 1969 Type *OpTy = CI->getArgOperand(0)->getType(); 1970 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1971 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1972 Intrinsic::ID IID; 1973 if (VecWidth == 128 && EltWidth == 32) 1974 IID = Intrinsic::x86_avx512_fpclass_ps_128; 1975 else if (VecWidth == 256 && EltWidth == 32) 1976 IID = Intrinsic::x86_avx512_fpclass_ps_256; 1977 else if (VecWidth == 512 && EltWidth == 32) 1978 IID = Intrinsic::x86_avx512_fpclass_ps_512; 1979 else if (VecWidth == 128 && EltWidth == 64) 1980 IID = Intrinsic::x86_avx512_fpclass_pd_128; 1981 else if (VecWidth == 256 && EltWidth == 64) 1982 IID = Intrinsic::x86_avx512_fpclass_pd_256; 1983 else if (VecWidth == 512 && EltWidth == 64) 1984 IID = Intrinsic::x86_avx512_fpclass_pd_512; 1985 else 1986 llvm_unreachable("Unexpected intrinsic"); 1987 1988 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1989 { CI->getOperand(0), CI->getArgOperand(1) }); 1990 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1991 } else if (IsX86 && Name.startswith("avx512.mask.cmp.p")) { 1992 Type *OpTy = CI->getArgOperand(0)->getType(); 1993 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1994 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1995 Intrinsic::ID IID; 1996 if (VecWidth == 128 && EltWidth == 32) 1997 IID = Intrinsic::x86_avx512_cmp_ps_128; 1998 else if (VecWidth == 256 && EltWidth == 32) 1999 IID = Intrinsic::x86_avx512_cmp_ps_256; 2000 else if (VecWidth == 512 && EltWidth == 32) 2001 IID = Intrinsic::x86_avx512_cmp_ps_512; 2002 else if (VecWidth == 128 && EltWidth == 64) 2003 IID = Intrinsic::x86_avx512_cmp_pd_128; 2004 else if (VecWidth == 256 && EltWidth == 64) 2005 IID = Intrinsic::x86_avx512_cmp_pd_256; 2006 else if (VecWidth == 512 && EltWidth == 64) 2007 IID = Intrinsic::x86_avx512_cmp_pd_512; 2008 else 2009 llvm_unreachable("Unexpected intrinsic"); 2010 2011 SmallVector<Value *, 4> Args; 2012 Args.push_back(CI->getArgOperand(0)); 2013 Args.push_back(CI->getArgOperand(1)); 2014 Args.push_back(CI->getArgOperand(2)); 2015 if (CI->getNumArgOperands() == 5) 2016 Args.push_back(CI->getArgOperand(4)); 2017 2018 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2019 Args); 2020 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(3)); 2021 } else if (IsX86 && Name.startswith("avx512.mask.cmp.") && 2022 Name[16] != 'p') { 2023 // Integer compare intrinsics. 2024 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2025 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true); 2026 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) { 2027 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2028 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false); 2029 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") || 2030 Name.startswith("avx512.cvtw2mask.") || 2031 Name.startswith("avx512.cvtd2mask.") || 2032 Name.startswith("avx512.cvtq2mask."))) { 2033 Value *Op = CI->getArgOperand(0); 2034 Value *Zero = llvm::Constant::getNullValue(Op->getType()); 2035 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero); 2036 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr); 2037 } else if(IsX86 && (Name == "ssse3.pabs.b.128" || 2038 Name == "ssse3.pabs.w.128" || 2039 Name == "ssse3.pabs.d.128" || 2040 Name.startswith("avx2.pabs") || 2041 Name.startswith("avx512.mask.pabs"))) { 2042 Rep = upgradeAbs(Builder, *CI); 2043 } else if (IsX86 && (Name == "sse41.pmaxsb" || 2044 Name == "sse2.pmaxs.w" || 2045 Name == "sse41.pmaxsd" || 2046 Name.startswith("avx2.pmaxs") || 2047 Name.startswith("avx512.mask.pmaxs"))) { 2048 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 2049 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 2050 Name == "sse41.pmaxuw" || 2051 Name == "sse41.pmaxud" || 2052 Name.startswith("avx2.pmaxu") || 2053 Name.startswith("avx512.mask.pmaxu"))) { 2054 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 2055 } else if (IsX86 && (Name == "sse41.pminsb" || 2056 Name == "sse2.pmins.w" || 2057 Name == "sse41.pminsd" || 2058 Name.startswith("avx2.pmins") || 2059 Name.startswith("avx512.mask.pmins"))) { 2060 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 2061 } else if (IsX86 && (Name == "sse2.pminu.b" || 2062 Name == "sse41.pminuw" || 2063 Name == "sse41.pminud" || 2064 Name.startswith("avx2.pminu") || 2065 Name.startswith("avx512.mask.pminu"))) { 2066 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 2067 } else if (IsX86 && (Name == "sse2.pmulu.dq" || 2068 Name == "avx2.pmulu.dq" || 2069 Name == "avx512.pmulu.dq.512" || 2070 Name.startswith("avx512.mask.pmulu.dq."))) { 2071 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false); 2072 } else if (IsX86 && (Name == "sse41.pmuldq" || 2073 Name == "avx2.pmul.dq" || 2074 Name == "avx512.pmul.dq.512" || 2075 Name.startswith("avx512.mask.pmul.dq."))) { 2076 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true); 2077 } else if (IsX86 && (Name == "sse.cvtsi2ss" || 2078 Name == "sse2.cvtsi2sd" || 2079 Name == "sse.cvtsi642ss" || 2080 Name == "sse2.cvtsi642sd")) { 2081 Rep = Builder.CreateSIToFP(CI->getArgOperand(1), 2082 CI->getType()->getVectorElementType()); 2083 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 2084 } else if (IsX86 && Name == "avx512.cvtusi2sd") { 2085 Rep = Builder.CreateUIToFP(CI->getArgOperand(1), 2086 CI->getType()->getVectorElementType()); 2087 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 2088 } else if (IsX86 && Name == "sse2.cvtss2sd") { 2089 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0); 2090 Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType()); 2091 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 2092 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 2093 Name == "sse2.cvtdq2ps" || 2094 Name == "avx.cvtdq2.pd.256" || 2095 Name == "avx.cvtdq2.ps.256" || 2096 Name.startswith("avx512.mask.cvtdq2pd.") || 2097 Name.startswith("avx512.mask.cvtudq2pd.") || 2098 Name.startswith("avx512.mask.cvtdq2ps.") || 2099 Name.startswith("avx512.mask.cvtudq2ps.") || 2100 Name.startswith("avx512.mask.cvtqq2pd.") || 2101 Name.startswith("avx512.mask.cvtuqq2pd.") || 2102 Name == "avx512.mask.cvtqq2ps.256" || 2103 Name == "avx512.mask.cvtqq2ps.512" || 2104 Name == "avx512.mask.cvtuqq2ps.256" || 2105 Name == "avx512.mask.cvtuqq2ps.512" || 2106 Name == "sse2.cvtps2pd" || 2107 Name == "avx.cvt.ps2.pd.256" || 2108 Name == "avx512.mask.cvtps2pd.128" || 2109 Name == "avx512.mask.cvtps2pd.256")) { 2110 Type *DstTy = CI->getType(); 2111 Rep = CI->getArgOperand(0); 2112 Type *SrcTy = Rep->getType(); 2113 2114 unsigned NumDstElts = DstTy->getVectorNumElements(); 2115 if (NumDstElts < SrcTy->getVectorNumElements()) { 2116 assert(NumDstElts == 2 && "Unexpected vector size"); 2117 uint32_t ShuffleMask[2] = { 0, 1 }; 2118 Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); 2119 } 2120 2121 bool IsPS2PD = SrcTy->getVectorElementType()->isFloatTy(); 2122 bool IsUnsigned = (StringRef::npos != Name.find("cvtu")); 2123 if (IsPS2PD) 2124 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 2125 else if (CI->getNumArgOperands() == 4 && 2126 (!isa<ConstantInt>(CI->getArgOperand(3)) || 2127 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 2128 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round 2129 : Intrinsic::x86_avx512_sitofp_round; 2130 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID, 2131 { DstTy, SrcTy }); 2132 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) }); 2133 } else { 2134 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt") 2135 : Builder.CreateSIToFP(Rep, DstTy, "cvt"); 2136 } 2137 2138 if (CI->getNumArgOperands() >= 3) 2139 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2140 CI->getArgOperand(1)); 2141 } else if (IsX86 && (Name.startswith("avx512.mask.vcvtph2ps.") || 2142 Name.startswith("vcvtph2ps."))) { 2143 Type *DstTy = CI->getType(); 2144 Rep = CI->getArgOperand(0); 2145 Type *SrcTy = Rep->getType(); 2146 unsigned NumDstElts = DstTy->getVectorNumElements(); 2147 if (NumDstElts != SrcTy->getVectorNumElements()) { 2148 assert(NumDstElts == 4 && "Unexpected vector size"); 2149 uint32_t ShuffleMask[4] = {0, 1, 2, 3}; 2150 Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); 2151 } 2152 Rep = Builder.CreateBitCast( 2153 Rep, VectorType::get(Type::getHalfTy(C), NumDstElts)); 2154 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtph2ps"); 2155 if (CI->getNumArgOperands() >= 3) 2156 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2157 CI->getArgOperand(1)); 2158 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 2159 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2160 CI->getArgOperand(1), CI->getArgOperand(2), 2161 /*Aligned*/false); 2162 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 2163 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2164 CI->getArgOperand(1),CI->getArgOperand(2), 2165 /*Aligned*/true); 2166 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { 2167 Type *ResultTy = CI->getType(); 2168 Type *PtrTy = ResultTy->getVectorElementType(); 2169 2170 // Cast the pointer to element type. 2171 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2172 llvm::PointerType::getUnqual(PtrTy)); 2173 2174 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2175 ResultTy->getVectorNumElements()); 2176 2177 Function *ELd = Intrinsic::getDeclaration(F->getParent(), 2178 Intrinsic::masked_expandload, 2179 ResultTy); 2180 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); 2181 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { 2182 Type *ResultTy = CI->getArgOperand(1)->getType(); 2183 Type *PtrTy = ResultTy->getVectorElementType(); 2184 2185 // Cast the pointer to element type. 2186 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2187 llvm::PointerType::getUnqual(PtrTy)); 2188 2189 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2190 ResultTy->getVectorNumElements()); 2191 2192 Function *CSt = Intrinsic::getDeclaration(F->getParent(), 2193 Intrinsic::masked_compressstore, 2194 ResultTy); 2195 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); 2196 } else if (IsX86 && (Name.startswith("avx512.mask.compress.") || 2197 Name.startswith("avx512.mask.expand."))) { 2198 Type *ResultTy = CI->getType(); 2199 2200 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2201 ResultTy->getVectorNumElements()); 2202 2203 bool IsCompress = Name[12] == 'c'; 2204 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress 2205 : Intrinsic::x86_avx512_mask_expand; 2206 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy); 2207 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1), 2208 MaskVec }); 2209 } else if (IsX86 && Name.startswith("xop.vpcom")) { 2210 bool IsSigned; 2211 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") || 2212 Name.endswith("uq")) 2213 IsSigned = false; 2214 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") || 2215 Name.endswith("q")) 2216 IsSigned = true; 2217 else 2218 llvm_unreachable("Unknown suffix"); 2219 2220 unsigned Imm; 2221 if (CI->getNumArgOperands() == 3) { 2222 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2223 } else { 2224 Name = Name.substr(9); // strip off "xop.vpcom" 2225 if (Name.startswith("lt")) 2226 Imm = 0; 2227 else if (Name.startswith("le")) 2228 Imm = 1; 2229 else if (Name.startswith("gt")) 2230 Imm = 2; 2231 else if (Name.startswith("ge")) 2232 Imm = 3; 2233 else if (Name.startswith("eq")) 2234 Imm = 4; 2235 else if (Name.startswith("ne")) 2236 Imm = 5; 2237 else if (Name.startswith("false")) 2238 Imm = 6; 2239 else if (Name.startswith("true")) 2240 Imm = 7; 2241 else 2242 llvm_unreachable("Unknown condition"); 2243 } 2244 2245 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned); 2246 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 2247 Value *Sel = CI->getArgOperand(2); 2248 Value *NotSel = Builder.CreateNot(Sel); 2249 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 2250 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 2251 Rep = Builder.CreateOr(Sel0, Sel1); 2252 } else if (IsX86 && (Name.startswith("xop.vprot") || 2253 Name.startswith("avx512.prol") || 2254 Name.startswith("avx512.mask.prol"))) { 2255 Rep = upgradeX86Rotate(Builder, *CI, false); 2256 } else if (IsX86 && (Name.startswith("avx512.pror") || 2257 Name.startswith("avx512.mask.pror"))) { 2258 Rep = upgradeX86Rotate(Builder, *CI, true); 2259 } else if (IsX86 && (Name.startswith("avx512.vpshld.") || 2260 Name.startswith("avx512.mask.vpshld") || 2261 Name.startswith("avx512.maskz.vpshld"))) { 2262 bool ZeroMask = Name[11] == 'z'; 2263 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask); 2264 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") || 2265 Name.startswith("avx512.mask.vpshrd") || 2266 Name.startswith("avx512.maskz.vpshrd"))) { 2267 bool ZeroMask = Name[11] == 'z'; 2268 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask); 2269 } else if (IsX86 && Name == "sse42.crc32.64.8") { 2270 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 2271 Intrinsic::x86_sse42_crc32_32_8); 2272 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 2273 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 2274 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 2275 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || 2276 Name.startswith("avx512.vbroadcast.s"))) { 2277 // Replace broadcasts with a series of insertelements. 2278 Type *VecTy = CI->getType(); 2279 Type *EltTy = VecTy->getVectorElementType(); 2280 unsigned EltNum = VecTy->getVectorNumElements(); 2281 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 2282 EltTy->getPointerTo()); 2283 Value *Load = Builder.CreateLoad(EltTy, Cast); 2284 Type *I32Ty = Type::getInt32Ty(C); 2285 Rep = UndefValue::get(VecTy); 2286 for (unsigned I = 0; I < EltNum; ++I) 2287 Rep = Builder.CreateInsertElement(Rep, Load, 2288 ConstantInt::get(I32Ty, I)); 2289 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 2290 Name.startswith("sse41.pmovzx") || 2291 Name.startswith("avx2.pmovsx") || 2292 Name.startswith("avx2.pmovzx") || 2293 Name.startswith("avx512.mask.pmovsx") || 2294 Name.startswith("avx512.mask.pmovzx"))) { 2295 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 2296 VectorType *DstTy = cast<VectorType>(CI->getType()); 2297 unsigned NumDstElts = DstTy->getNumElements(); 2298 2299 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 2300 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2301 for (unsigned i = 0; i != NumDstElts; ++i) 2302 ShuffleMask[i] = i; 2303 2304 Value *SV = Builder.CreateShuffleVector( 2305 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 2306 2307 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 2308 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 2309 : Builder.CreateZExt(SV, DstTy); 2310 // If there are 3 arguments, it's a masked intrinsic so we need a select. 2311 if (CI->getNumArgOperands() == 3) 2312 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2313 CI->getArgOperand(1)); 2314 } else if (Name == "avx512.mask.pmov.qd.256" || 2315 Name == "avx512.mask.pmov.qd.512" || 2316 Name == "avx512.mask.pmov.wb.256" || 2317 Name == "avx512.mask.pmov.wb.512") { 2318 Type *Ty = CI->getArgOperand(1)->getType(); 2319 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty); 2320 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2321 CI->getArgOperand(1)); 2322 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 2323 Name == "avx2.vbroadcasti128")) { 2324 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 2325 Type *EltTy = CI->getType()->getVectorElementType(); 2326 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 2327 Type *VT = VectorType::get(EltTy, NumSrcElts); 2328 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 2329 PointerType::getUnqual(VT)); 2330 Value *Load = Builder.CreateAlignedLoad(VT, Op, Align(1)); 2331 if (NumSrcElts == 2) 2332 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2333 { 0, 1, 0, 1 }); 2334 else 2335 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2336 { 0, 1, 2, 3, 0, 1, 2, 3 }); 2337 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") || 2338 Name.startswith("avx512.mask.shuf.f"))) { 2339 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2340 Type *VT = CI->getType(); 2341 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128; 2342 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits(); 2343 unsigned ControlBitsMask = NumLanes - 1; 2344 unsigned NumControlBits = NumLanes / 2; 2345 SmallVector<uint32_t, 8> ShuffleMask(0); 2346 2347 for (unsigned l = 0; l != NumLanes; ++l) { 2348 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask; 2349 // We actually need the other source. 2350 if (l >= NumLanes / 2) 2351 LaneMask += NumLanes; 2352 for (unsigned i = 0; i != NumElementsInLane; ++i) 2353 ShuffleMask.push_back(LaneMask * NumElementsInLane + i); 2354 } 2355 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2356 CI->getArgOperand(1), ShuffleMask); 2357 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2358 CI->getArgOperand(3)); 2359 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || 2360 Name.startswith("avx512.mask.broadcasti"))) { 2361 unsigned NumSrcElts = 2362 CI->getArgOperand(0)->getType()->getVectorNumElements(); 2363 unsigned NumDstElts = CI->getType()->getVectorNumElements(); 2364 2365 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2366 for (unsigned i = 0; i != NumDstElts; ++i) 2367 ShuffleMask[i] = i % NumSrcElts; 2368 2369 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2370 CI->getArgOperand(0), 2371 ShuffleMask); 2372 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2373 CI->getArgOperand(1)); 2374 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 2375 Name.startswith("avx2.vbroadcast") || 2376 Name.startswith("avx512.pbroadcast") || 2377 Name.startswith("avx512.mask.broadcast.s"))) { 2378 // Replace vp?broadcasts with a vector shuffle. 2379 Value *Op = CI->getArgOperand(0); 2380 unsigned NumElts = CI->getType()->getVectorNumElements(); 2381 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 2382 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 2383 Constant::getNullValue(MaskTy)); 2384 2385 if (CI->getNumArgOperands() == 3) 2386 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2387 CI->getArgOperand(1)); 2388 } else if (IsX86 && (Name.startswith("sse2.padds.") || 2389 Name.startswith("sse2.psubs.") || 2390 Name.startswith("avx2.padds.") || 2391 Name.startswith("avx2.psubs.") || 2392 Name.startswith("avx512.padds.") || 2393 Name.startswith("avx512.psubs.") || 2394 Name.startswith("avx512.mask.padds.") || 2395 Name.startswith("avx512.mask.psubs."))) { 2396 bool IsAdd = Name.contains(".padds"); 2397 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, true, IsAdd); 2398 } else if (IsX86 && (Name.startswith("sse2.paddus.") || 2399 Name.startswith("sse2.psubus.") || 2400 Name.startswith("avx2.paddus.") || 2401 Name.startswith("avx2.psubus.") || 2402 Name.startswith("avx512.mask.paddus.") || 2403 Name.startswith("avx512.mask.psubus."))) { 2404 bool IsAdd = Name.contains(".paddus"); 2405 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, false, IsAdd); 2406 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 2407 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2408 CI->getArgOperand(1), 2409 CI->getArgOperand(2), 2410 CI->getArgOperand(3), 2411 CI->getArgOperand(4), 2412 false); 2413 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 2414 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2415 CI->getArgOperand(1), 2416 CI->getArgOperand(2), 2417 CI->getArgOperand(3), 2418 CI->getArgOperand(4), 2419 true); 2420 } else if (IsX86 && (Name == "sse2.psll.dq" || 2421 Name == "avx2.psll.dq")) { 2422 // 128/256-bit shift left specified in bits. 2423 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2424 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 2425 Shift / 8); // Shift is in bits. 2426 } else if (IsX86 && (Name == "sse2.psrl.dq" || 2427 Name == "avx2.psrl.dq")) { 2428 // 128/256-bit shift right specified in bits. 2429 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2430 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 2431 Shift / 8); // Shift is in bits. 2432 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 2433 Name == "avx2.psll.dq.bs" || 2434 Name == "avx512.psll.dq.512")) { 2435 // 128/256/512-bit shift left specified in bytes. 2436 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2437 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2438 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 2439 Name == "avx2.psrl.dq.bs" || 2440 Name == "avx512.psrl.dq.512")) { 2441 // 128/256/512-bit shift right specified in bytes. 2442 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2443 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2444 } else if (IsX86 && (Name == "sse41.pblendw" || 2445 Name.startswith("sse41.blendp") || 2446 Name.startswith("avx.blend.p") || 2447 Name == "avx2.pblendw" || 2448 Name.startswith("avx2.pblendd."))) { 2449 Value *Op0 = CI->getArgOperand(0); 2450 Value *Op1 = CI->getArgOperand(1); 2451 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2452 VectorType *VecTy = cast<VectorType>(CI->getType()); 2453 unsigned NumElts = VecTy->getNumElements(); 2454 2455 SmallVector<uint32_t, 16> Idxs(NumElts); 2456 for (unsigned i = 0; i != NumElts; ++i) 2457 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 2458 2459 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2460 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 2461 Name == "avx2.vinserti128" || 2462 Name.startswith("avx512.mask.insert"))) { 2463 Value *Op0 = CI->getArgOperand(0); 2464 Value *Op1 = CI->getArgOperand(1); 2465 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2466 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2467 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 2468 unsigned Scale = DstNumElts / SrcNumElts; 2469 2470 // Mask off the high bits of the immediate value; hardware ignores those. 2471 Imm = Imm % Scale; 2472 2473 // Extend the second operand into a vector the size of the destination. 2474 Value *UndefV = UndefValue::get(Op1->getType()); 2475 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2476 for (unsigned i = 0; i != SrcNumElts; ++i) 2477 Idxs[i] = i; 2478 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 2479 Idxs[i] = SrcNumElts; 2480 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 2481 2482 // Insert the second operand into the first operand. 2483 2484 // Note that there is no guarantee that instruction lowering will actually 2485 // produce a vinsertf128 instruction for the created shuffles. In 2486 // particular, the 0 immediate case involves no lane changes, so it can 2487 // be handled as a blend. 2488 2489 // Example of shuffle mask for 32-bit elements: 2490 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 2491 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 2492 2493 // First fill with identify mask. 2494 for (unsigned i = 0; i != DstNumElts; ++i) 2495 Idxs[i] = i; 2496 // Then replace the elements where we need to insert. 2497 for (unsigned i = 0; i != SrcNumElts; ++i) 2498 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 2499 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 2500 2501 // If the intrinsic has a mask operand, handle that. 2502 if (CI->getNumArgOperands() == 5) 2503 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2504 CI->getArgOperand(3)); 2505 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 2506 Name == "avx2.vextracti128" || 2507 Name.startswith("avx512.mask.vextract"))) { 2508 Value *Op0 = CI->getArgOperand(0); 2509 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2510 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2511 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 2512 unsigned Scale = SrcNumElts / DstNumElts; 2513 2514 // Mask off the high bits of the immediate value; hardware ignores those. 2515 Imm = Imm % Scale; 2516 2517 // Get indexes for the subvector of the input vector. 2518 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2519 for (unsigned i = 0; i != DstNumElts; ++i) { 2520 Idxs[i] = i + (Imm * DstNumElts); 2521 } 2522 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2523 2524 // If the intrinsic has a mask operand, handle that. 2525 if (CI->getNumArgOperands() == 4) 2526 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2527 CI->getArgOperand(2)); 2528 } else if (!IsX86 && Name == "stackprotectorcheck") { 2529 Rep = nullptr; 2530 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 2531 Name.startswith("avx512.mask.perm.di."))) { 2532 Value *Op0 = CI->getArgOperand(0); 2533 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2534 VectorType *VecTy = cast<VectorType>(CI->getType()); 2535 unsigned NumElts = VecTy->getNumElements(); 2536 2537 SmallVector<uint32_t, 8> Idxs(NumElts); 2538 for (unsigned i = 0; i != NumElts; ++i) 2539 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 2540 2541 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2542 2543 if (CI->getNumArgOperands() == 4) 2544 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2545 CI->getArgOperand(2)); 2546 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") || 2547 Name == "avx2.vperm2i128")) { 2548 // The immediate permute control byte looks like this: 2549 // [1:0] - select 128 bits from sources for low half of destination 2550 // [2] - ignore 2551 // [3] - zero low half of destination 2552 // [5:4] - select 128 bits from sources for high half of destination 2553 // [6] - ignore 2554 // [7] - zero high half of destination 2555 2556 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2557 2558 unsigned NumElts = CI->getType()->getVectorNumElements(); 2559 unsigned HalfSize = NumElts / 2; 2560 SmallVector<uint32_t, 8> ShuffleMask(NumElts); 2561 2562 // Determine which operand(s) are actually in use for this instruction. 2563 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2564 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2565 2566 // If needed, replace operands based on zero mask. 2567 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0; 2568 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1; 2569 2570 // Permute low half of result. 2571 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0; 2572 for (unsigned i = 0; i < HalfSize; ++i) 2573 ShuffleMask[i] = StartIndex + i; 2574 2575 // Permute high half of result. 2576 StartIndex = (Imm & 0x10) ? HalfSize : 0; 2577 for (unsigned i = 0; i < HalfSize; ++i) 2578 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i; 2579 2580 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2581 2582 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 2583 Name == "sse2.pshuf.d" || 2584 Name.startswith("avx512.mask.vpermil.p") || 2585 Name.startswith("avx512.mask.pshuf.d."))) { 2586 Value *Op0 = CI->getArgOperand(0); 2587 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2588 VectorType *VecTy = cast<VectorType>(CI->getType()); 2589 unsigned NumElts = VecTy->getNumElements(); 2590 // Calculate the size of each index in the immediate. 2591 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 2592 unsigned IdxMask = ((1 << IdxSize) - 1); 2593 2594 SmallVector<uint32_t, 8> Idxs(NumElts); 2595 // Lookup the bits for this element, wrapping around the immediate every 2596 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 2597 // to offset by the first index of each group. 2598 for (unsigned i = 0; i != NumElts; ++i) 2599 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 2600 2601 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2602 2603 if (CI->getNumArgOperands() == 4) 2604 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2605 CI->getArgOperand(2)); 2606 } else if (IsX86 && (Name == "sse2.pshufl.w" || 2607 Name.startswith("avx512.mask.pshufl.w."))) { 2608 Value *Op0 = CI->getArgOperand(0); 2609 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2610 unsigned NumElts = CI->getType()->getVectorNumElements(); 2611 2612 SmallVector<uint32_t, 16> Idxs(NumElts); 2613 for (unsigned l = 0; l != NumElts; l += 8) { 2614 for (unsigned i = 0; i != 4; ++i) 2615 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 2616 for (unsigned i = 4; i != 8; ++i) 2617 Idxs[i + l] = i + l; 2618 } 2619 2620 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2621 2622 if (CI->getNumArgOperands() == 4) 2623 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2624 CI->getArgOperand(2)); 2625 } else if (IsX86 && (Name == "sse2.pshufh.w" || 2626 Name.startswith("avx512.mask.pshufh.w."))) { 2627 Value *Op0 = CI->getArgOperand(0); 2628 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2629 unsigned NumElts = CI->getType()->getVectorNumElements(); 2630 2631 SmallVector<uint32_t, 16> Idxs(NumElts); 2632 for (unsigned l = 0; l != NumElts; l += 8) { 2633 for (unsigned i = 0; i != 4; ++i) 2634 Idxs[i + l] = i + l; 2635 for (unsigned i = 0; i != 4; ++i) 2636 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 2637 } 2638 2639 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2640 2641 if (CI->getNumArgOperands() == 4) 2642 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2643 CI->getArgOperand(2)); 2644 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 2645 Value *Op0 = CI->getArgOperand(0); 2646 Value *Op1 = CI->getArgOperand(1); 2647 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2648 unsigned NumElts = CI->getType()->getVectorNumElements(); 2649 2650 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2651 unsigned HalfLaneElts = NumLaneElts / 2; 2652 2653 SmallVector<uint32_t, 16> Idxs(NumElts); 2654 for (unsigned i = 0; i != NumElts; ++i) { 2655 // Base index is the starting element of the lane. 2656 Idxs[i] = i - (i % NumLaneElts); 2657 // If we are half way through the lane switch to the other source. 2658 if ((i % NumLaneElts) >= HalfLaneElts) 2659 Idxs[i] += NumElts; 2660 // Now select the specific element. By adding HalfLaneElts bits from 2661 // the immediate. Wrapping around the immediate every 8-bits. 2662 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 2663 } 2664 2665 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2666 2667 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2668 CI->getArgOperand(3)); 2669 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 2670 Name.startswith("avx512.mask.movshdup") || 2671 Name.startswith("avx512.mask.movsldup"))) { 2672 Value *Op0 = CI->getArgOperand(0); 2673 unsigned NumElts = CI->getType()->getVectorNumElements(); 2674 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2675 2676 unsigned Offset = 0; 2677 if (Name.startswith("avx512.mask.movshdup.")) 2678 Offset = 1; 2679 2680 SmallVector<uint32_t, 16> Idxs(NumElts); 2681 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 2682 for (unsigned i = 0; i != NumLaneElts; i += 2) { 2683 Idxs[i + l + 0] = i + l + Offset; 2684 Idxs[i + l + 1] = i + l + Offset; 2685 } 2686 2687 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2688 2689 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2690 CI->getArgOperand(1)); 2691 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 2692 Name.startswith("avx512.mask.unpckl."))) { 2693 Value *Op0 = CI->getArgOperand(0); 2694 Value *Op1 = CI->getArgOperand(1); 2695 int NumElts = CI->getType()->getVectorNumElements(); 2696 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2697 2698 SmallVector<uint32_t, 64> Idxs(NumElts); 2699 for (int l = 0; l != NumElts; l += NumLaneElts) 2700 for (int i = 0; i != NumLaneElts; ++i) 2701 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 2702 2703 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2704 2705 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2706 CI->getArgOperand(2)); 2707 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 2708 Name.startswith("avx512.mask.unpckh."))) { 2709 Value *Op0 = CI->getArgOperand(0); 2710 Value *Op1 = CI->getArgOperand(1); 2711 int NumElts = CI->getType()->getVectorNumElements(); 2712 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2713 2714 SmallVector<uint32_t, 64> Idxs(NumElts); 2715 for (int l = 0; l != NumElts; l += NumLaneElts) 2716 for (int i = 0; i != NumLaneElts; ++i) 2717 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 2718 2719 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2720 2721 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2722 CI->getArgOperand(2)); 2723 } else if (IsX86 && (Name.startswith("avx512.mask.and.") || 2724 Name.startswith("avx512.mask.pand."))) { 2725 VectorType *FTy = cast<VectorType>(CI->getType()); 2726 VectorType *ITy = VectorType::getInteger(FTy); 2727 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2728 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2729 Rep = Builder.CreateBitCast(Rep, FTy); 2730 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2731 CI->getArgOperand(2)); 2732 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") || 2733 Name.startswith("avx512.mask.pandn."))) { 2734 VectorType *FTy = cast<VectorType>(CI->getType()); 2735 VectorType *ITy = VectorType::getInteger(FTy); 2736 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 2737 Rep = Builder.CreateAnd(Rep, 2738 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2739 Rep = Builder.CreateBitCast(Rep, FTy); 2740 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2741 CI->getArgOperand(2)); 2742 } else if (IsX86 && (Name.startswith("avx512.mask.or.") || 2743 Name.startswith("avx512.mask.por."))) { 2744 VectorType *FTy = cast<VectorType>(CI->getType()); 2745 VectorType *ITy = VectorType::getInteger(FTy); 2746 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2747 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2748 Rep = Builder.CreateBitCast(Rep, FTy); 2749 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2750 CI->getArgOperand(2)); 2751 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") || 2752 Name.startswith("avx512.mask.pxor."))) { 2753 VectorType *FTy = cast<VectorType>(CI->getType()); 2754 VectorType *ITy = VectorType::getInteger(FTy); 2755 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2756 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2757 Rep = Builder.CreateBitCast(Rep, FTy); 2758 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2759 CI->getArgOperand(2)); 2760 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 2761 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2762 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2763 CI->getArgOperand(2)); 2764 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 2765 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2766 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2767 CI->getArgOperand(2)); 2768 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 2769 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2770 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2771 CI->getArgOperand(2)); 2772 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) { 2773 if (Name.endswith(".512")) { 2774 Intrinsic::ID IID; 2775 if (Name[17] == 's') 2776 IID = Intrinsic::x86_avx512_add_ps_512; 2777 else 2778 IID = Intrinsic::x86_avx512_add_pd_512; 2779 2780 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2781 { CI->getArgOperand(0), CI->getArgOperand(1), 2782 CI->getArgOperand(4) }); 2783 } else { 2784 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2785 } 2786 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2787 CI->getArgOperand(2)); 2788 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 2789 if (Name.endswith(".512")) { 2790 Intrinsic::ID IID; 2791 if (Name[17] == 's') 2792 IID = Intrinsic::x86_avx512_div_ps_512; 2793 else 2794 IID = Intrinsic::x86_avx512_div_pd_512; 2795 2796 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2797 { CI->getArgOperand(0), CI->getArgOperand(1), 2798 CI->getArgOperand(4) }); 2799 } else { 2800 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 2801 } 2802 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2803 CI->getArgOperand(2)); 2804 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 2805 if (Name.endswith(".512")) { 2806 Intrinsic::ID IID; 2807 if (Name[17] == 's') 2808 IID = Intrinsic::x86_avx512_mul_ps_512; 2809 else 2810 IID = Intrinsic::x86_avx512_mul_pd_512; 2811 2812 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2813 { CI->getArgOperand(0), CI->getArgOperand(1), 2814 CI->getArgOperand(4) }); 2815 } else { 2816 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2817 } 2818 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2819 CI->getArgOperand(2)); 2820 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 2821 if (Name.endswith(".512")) { 2822 Intrinsic::ID IID; 2823 if (Name[17] == 's') 2824 IID = Intrinsic::x86_avx512_sub_ps_512; 2825 else 2826 IID = Intrinsic::x86_avx512_sub_pd_512; 2827 2828 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2829 { CI->getArgOperand(0), CI->getArgOperand(1), 2830 CI->getArgOperand(4) }); 2831 } else { 2832 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2833 } 2834 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2835 CI->getArgOperand(2)); 2836 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") || 2837 Name.startswith("avx512.mask.min.p")) && 2838 Name.drop_front(18) == ".512") { 2839 bool IsDouble = Name[17] == 'd'; 2840 bool IsMin = Name[13] == 'i'; 2841 static const Intrinsic::ID MinMaxTbl[2][2] = { 2842 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 }, 2843 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 } 2844 }; 2845 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble]; 2846 2847 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2848 { CI->getArgOperand(0), CI->getArgOperand(1), 2849 CI->getArgOperand(4) }); 2850 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2851 CI->getArgOperand(2)); 2852 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 2853 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 2854 Intrinsic::ctlz, 2855 CI->getType()), 2856 { CI->getArgOperand(0), Builder.getInt1(false) }); 2857 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2858 CI->getArgOperand(1)); 2859 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 2860 bool IsImmediate = Name[16] == 'i' || 2861 (Name.size() > 18 && Name[18] == 'i'); 2862 bool IsVariable = Name[16] == 'v'; 2863 char Size = Name[16] == '.' ? Name[17] : 2864 Name[17] == '.' ? Name[18] : 2865 Name[18] == '.' ? Name[19] : 2866 Name[20]; 2867 2868 Intrinsic::ID IID; 2869 if (IsVariable && Name[17] != '.') { 2870 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 2871 IID = Intrinsic::x86_avx2_psllv_q; 2872 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 2873 IID = Intrinsic::x86_avx2_psllv_q_256; 2874 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 2875 IID = Intrinsic::x86_avx2_psllv_d; 2876 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 2877 IID = Intrinsic::x86_avx2_psllv_d_256; 2878 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 2879 IID = Intrinsic::x86_avx512_psllv_w_128; 2880 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 2881 IID = Intrinsic::x86_avx512_psllv_w_256; 2882 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 2883 IID = Intrinsic::x86_avx512_psllv_w_512; 2884 else 2885 llvm_unreachable("Unexpected size"); 2886 } else if (Name.endswith(".128")) { 2887 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 2888 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 2889 : Intrinsic::x86_sse2_psll_d; 2890 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 2891 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 2892 : Intrinsic::x86_sse2_psll_q; 2893 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 2894 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 2895 : Intrinsic::x86_sse2_psll_w; 2896 else 2897 llvm_unreachable("Unexpected size"); 2898 } else if (Name.endswith(".256")) { 2899 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 2900 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 2901 : Intrinsic::x86_avx2_psll_d; 2902 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 2903 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 2904 : Intrinsic::x86_avx2_psll_q; 2905 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 2906 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 2907 : Intrinsic::x86_avx2_psll_w; 2908 else 2909 llvm_unreachable("Unexpected size"); 2910 } else { 2911 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 2912 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 2913 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 2914 Intrinsic::x86_avx512_psll_d_512; 2915 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 2916 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 2917 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 2918 Intrinsic::x86_avx512_psll_q_512; 2919 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 2920 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 2921 : Intrinsic::x86_avx512_psll_w_512; 2922 else 2923 llvm_unreachable("Unexpected size"); 2924 } 2925 2926 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2927 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 2928 bool IsImmediate = Name[16] == 'i' || 2929 (Name.size() > 18 && Name[18] == 'i'); 2930 bool IsVariable = Name[16] == 'v'; 2931 char Size = Name[16] == '.' ? Name[17] : 2932 Name[17] == '.' ? Name[18] : 2933 Name[18] == '.' ? Name[19] : 2934 Name[20]; 2935 2936 Intrinsic::ID IID; 2937 if (IsVariable && Name[17] != '.') { 2938 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 2939 IID = Intrinsic::x86_avx2_psrlv_q; 2940 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 2941 IID = Intrinsic::x86_avx2_psrlv_q_256; 2942 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 2943 IID = Intrinsic::x86_avx2_psrlv_d; 2944 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 2945 IID = Intrinsic::x86_avx2_psrlv_d_256; 2946 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 2947 IID = Intrinsic::x86_avx512_psrlv_w_128; 2948 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 2949 IID = Intrinsic::x86_avx512_psrlv_w_256; 2950 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 2951 IID = Intrinsic::x86_avx512_psrlv_w_512; 2952 else 2953 llvm_unreachable("Unexpected size"); 2954 } else if (Name.endswith(".128")) { 2955 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 2956 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 2957 : Intrinsic::x86_sse2_psrl_d; 2958 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 2959 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 2960 : Intrinsic::x86_sse2_psrl_q; 2961 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 2962 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 2963 : Intrinsic::x86_sse2_psrl_w; 2964 else 2965 llvm_unreachable("Unexpected size"); 2966 } else if (Name.endswith(".256")) { 2967 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 2968 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 2969 : Intrinsic::x86_avx2_psrl_d; 2970 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 2971 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 2972 : Intrinsic::x86_avx2_psrl_q; 2973 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 2974 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 2975 : Intrinsic::x86_avx2_psrl_w; 2976 else 2977 llvm_unreachable("Unexpected size"); 2978 } else { 2979 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 2980 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 2981 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 2982 Intrinsic::x86_avx512_psrl_d_512; 2983 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 2984 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 2985 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 2986 Intrinsic::x86_avx512_psrl_q_512; 2987 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 2988 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 2989 : Intrinsic::x86_avx512_psrl_w_512; 2990 else 2991 llvm_unreachable("Unexpected size"); 2992 } 2993 2994 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2995 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 2996 bool IsImmediate = Name[16] == 'i' || 2997 (Name.size() > 18 && Name[18] == 'i'); 2998 bool IsVariable = Name[16] == 'v'; 2999 char Size = Name[16] == '.' ? Name[17] : 3000 Name[17] == '.' ? Name[18] : 3001 Name[18] == '.' ? Name[19] : 3002 Name[20]; 3003 3004 Intrinsic::ID IID; 3005 if (IsVariable && Name[17] != '.') { 3006 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 3007 IID = Intrinsic::x86_avx2_psrav_d; 3008 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 3009 IID = Intrinsic::x86_avx2_psrav_d_256; 3010 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 3011 IID = Intrinsic::x86_avx512_psrav_w_128; 3012 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 3013 IID = Intrinsic::x86_avx512_psrav_w_256; 3014 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 3015 IID = Intrinsic::x86_avx512_psrav_w_512; 3016 else 3017 llvm_unreachable("Unexpected size"); 3018 } else if (Name.endswith(".128")) { 3019 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 3020 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 3021 : Intrinsic::x86_sse2_psra_d; 3022 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 3023 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 3024 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 3025 Intrinsic::x86_avx512_psra_q_128; 3026 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 3027 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 3028 : Intrinsic::x86_sse2_psra_w; 3029 else 3030 llvm_unreachable("Unexpected size"); 3031 } else if (Name.endswith(".256")) { 3032 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 3033 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 3034 : Intrinsic::x86_avx2_psra_d; 3035 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 3036 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 3037 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 3038 Intrinsic::x86_avx512_psra_q_256; 3039 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 3040 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 3041 : Intrinsic::x86_avx2_psra_w; 3042 else 3043 llvm_unreachable("Unexpected size"); 3044 } else { 3045 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 3046 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 3047 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 3048 Intrinsic::x86_avx512_psra_d_512; 3049 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 3050 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 3051 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 3052 Intrinsic::x86_avx512_psra_q_512; 3053 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 3054 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 3055 : Intrinsic::x86_avx512_psra_w_512; 3056 else 3057 llvm_unreachable("Unexpected size"); 3058 } 3059 3060 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 3061 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 3062 Rep = upgradeMaskedMove(Builder, *CI); 3063 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 3064 Rep = UpgradeMaskToInt(Builder, *CI); 3065 } else if (IsX86 && Name.endswith(".movntdqa")) { 3066 Module *M = F->getParent(); 3067 MDNode *Node = MDNode::get( 3068 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 3069 3070 Value *Ptr = CI->getArgOperand(0); 3071 VectorType *VTy = cast<VectorType>(CI->getType()); 3072 3073 // Convert the type of the pointer to a pointer to the stored type. 3074 Value *BC = 3075 Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); 3076 LoadInst *LI = 3077 Builder.CreateAlignedLoad(VTy, BC, Align(VTy->getBitWidth() / 8)); 3078 LI->setMetadata(M->getMDKindID("nontemporal"), Node); 3079 Rep = LI; 3080 } else if (IsX86 && (Name.startswith("fma.vfmadd.") || 3081 Name.startswith("fma.vfmsub.") || 3082 Name.startswith("fma.vfnmadd.") || 3083 Name.startswith("fma.vfnmsub."))) { 3084 bool NegMul = Name[6] == 'n'; 3085 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's'; 3086 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's'; 3087 3088 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3089 CI->getArgOperand(2) }; 3090 3091 if (IsScalar) { 3092 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 3093 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 3094 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 3095 } 3096 3097 if (NegMul && !IsScalar) 3098 Ops[0] = Builder.CreateFNeg(Ops[0]); 3099 if (NegMul && IsScalar) 3100 Ops[1] = Builder.CreateFNeg(Ops[1]); 3101 if (NegAcc) 3102 Ops[2] = Builder.CreateFNeg(Ops[2]); 3103 3104 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 3105 Intrinsic::fma, 3106 Ops[0]->getType()), 3107 Ops); 3108 3109 if (IsScalar) 3110 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, 3111 (uint64_t)0); 3112 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) { 3113 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3114 CI->getArgOperand(2) }; 3115 3116 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 3117 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 3118 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 3119 3120 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 3121 Intrinsic::fma, 3122 Ops[0]->getType()), 3123 Ops); 3124 3125 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()), 3126 Rep, (uint64_t)0); 3127 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") || 3128 Name.startswith("avx512.maskz.vfmadd.s") || 3129 Name.startswith("avx512.mask3.vfmadd.s") || 3130 Name.startswith("avx512.mask3.vfmsub.s") || 3131 Name.startswith("avx512.mask3.vfnmsub.s"))) { 3132 bool IsMask3 = Name[11] == '3'; 3133 bool IsMaskZ = Name[11] == 'z'; 3134 // Drop the "avx512.mask." to make it easier. 3135 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3136 bool NegMul = Name[2] == 'n'; 3137 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3138 3139 Value *A = CI->getArgOperand(0); 3140 Value *B = CI->getArgOperand(1); 3141 Value *C = CI->getArgOperand(2); 3142 3143 if (NegMul && (IsMask3 || IsMaskZ)) 3144 A = Builder.CreateFNeg(A); 3145 if (NegMul && !(IsMask3 || IsMaskZ)) 3146 B = Builder.CreateFNeg(B); 3147 if (NegAcc) 3148 C = Builder.CreateFNeg(C); 3149 3150 A = Builder.CreateExtractElement(A, (uint64_t)0); 3151 B = Builder.CreateExtractElement(B, (uint64_t)0); 3152 C = Builder.CreateExtractElement(C, (uint64_t)0); 3153 3154 if (!isa<ConstantInt>(CI->getArgOperand(4)) || 3155 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) { 3156 Value *Ops[] = { A, B, C, CI->getArgOperand(4) }; 3157 3158 Intrinsic::ID IID; 3159 if (Name.back() == 'd') 3160 IID = Intrinsic::x86_avx512_vfmadd_f64; 3161 else 3162 IID = Intrinsic::x86_avx512_vfmadd_f32; 3163 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID); 3164 Rep = Builder.CreateCall(FMA, Ops); 3165 } else { 3166 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3167 Intrinsic::fma, 3168 A->getType()); 3169 Rep = Builder.CreateCall(FMA, { A, B, C }); 3170 } 3171 3172 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) : 3173 IsMask3 ? C : A; 3174 3175 // For Mask3 with NegAcc, we need to create a new extractelement that 3176 // avoids the negation above. 3177 if (NegAcc && IsMask3) 3178 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2), 3179 (uint64_t)0); 3180 3181 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3), 3182 Rep, PassThru); 3183 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0), 3184 Rep, (uint64_t)0); 3185 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") || 3186 Name.startswith("avx512.mask.vfnmadd.p") || 3187 Name.startswith("avx512.mask.vfnmsub.p") || 3188 Name.startswith("avx512.mask3.vfmadd.p") || 3189 Name.startswith("avx512.mask3.vfmsub.p") || 3190 Name.startswith("avx512.mask3.vfnmsub.p") || 3191 Name.startswith("avx512.maskz.vfmadd.p"))) { 3192 bool IsMask3 = Name[11] == '3'; 3193 bool IsMaskZ = Name[11] == 'z'; 3194 // Drop the "avx512.mask." to make it easier. 3195 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3196 bool NegMul = Name[2] == 'n'; 3197 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3198 3199 Value *A = CI->getArgOperand(0); 3200 Value *B = CI->getArgOperand(1); 3201 Value *C = CI->getArgOperand(2); 3202 3203 if (NegMul && (IsMask3 || IsMaskZ)) 3204 A = Builder.CreateFNeg(A); 3205 if (NegMul && !(IsMask3 || IsMaskZ)) 3206 B = Builder.CreateFNeg(B); 3207 if (NegAcc) 3208 C = Builder.CreateFNeg(C); 3209 3210 if (CI->getNumArgOperands() == 5 && 3211 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3212 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3213 Intrinsic::ID IID; 3214 // Check the character before ".512" in string. 3215 if (Name[Name.size()-5] == 's') 3216 IID = Intrinsic::x86_avx512_vfmadd_ps_512; 3217 else 3218 IID = Intrinsic::x86_avx512_vfmadd_pd_512; 3219 3220 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3221 { A, B, C, CI->getArgOperand(4) }); 3222 } else { 3223 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3224 Intrinsic::fma, 3225 A->getType()); 3226 Rep = Builder.CreateCall(FMA, { A, B, C }); 3227 } 3228 3229 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3230 IsMask3 ? CI->getArgOperand(2) : 3231 CI->getArgOperand(0); 3232 3233 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3234 } else if (IsX86 && Name.startswith("fma.vfmsubadd.p")) { 3235 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3236 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 3237 Intrinsic::ID IID; 3238 if (VecWidth == 128 && EltWidth == 32) 3239 IID = Intrinsic::x86_fma_vfmaddsub_ps; 3240 else if (VecWidth == 256 && EltWidth == 32) 3241 IID = Intrinsic::x86_fma_vfmaddsub_ps_256; 3242 else if (VecWidth == 128 && EltWidth == 64) 3243 IID = Intrinsic::x86_fma_vfmaddsub_pd; 3244 else if (VecWidth == 256 && EltWidth == 64) 3245 IID = Intrinsic::x86_fma_vfmaddsub_pd_256; 3246 else 3247 llvm_unreachable("Unexpected intrinsic"); 3248 3249 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3250 CI->getArgOperand(2) }; 3251 Ops[2] = Builder.CreateFNeg(Ops[2]); 3252 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3253 Ops); 3254 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") || 3255 Name.startswith("avx512.mask3.vfmaddsub.p") || 3256 Name.startswith("avx512.maskz.vfmaddsub.p") || 3257 Name.startswith("avx512.mask3.vfmsubadd.p"))) { 3258 bool IsMask3 = Name[11] == '3'; 3259 bool IsMaskZ = Name[11] == 'z'; 3260 // Drop the "avx512.mask." to make it easier. 3261 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3262 bool IsSubAdd = Name[3] == 's'; 3263 if (CI->getNumArgOperands() == 5) { 3264 Intrinsic::ID IID; 3265 // Check the character before ".512" in string. 3266 if (Name[Name.size()-5] == 's') 3267 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512; 3268 else 3269 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512; 3270 3271 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3272 CI->getArgOperand(2), CI->getArgOperand(4) }; 3273 if (IsSubAdd) 3274 Ops[2] = Builder.CreateFNeg(Ops[2]); 3275 3276 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3277 Ops); 3278 } else { 3279 int NumElts = CI->getType()->getVectorNumElements(); 3280 3281 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3282 CI->getArgOperand(2) }; 3283 3284 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3285 Ops[0]->getType()); 3286 Value *Odd = Builder.CreateCall(FMA, Ops); 3287 Ops[2] = Builder.CreateFNeg(Ops[2]); 3288 Value *Even = Builder.CreateCall(FMA, Ops); 3289 3290 if (IsSubAdd) 3291 std::swap(Even, Odd); 3292 3293 SmallVector<uint32_t, 32> Idxs(NumElts); 3294 for (int i = 0; i != NumElts; ++i) 3295 Idxs[i] = i + (i % 2) * NumElts; 3296 3297 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3298 } 3299 3300 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3301 IsMask3 ? CI->getArgOperand(2) : 3302 CI->getArgOperand(0); 3303 3304 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3305 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") || 3306 Name.startswith("avx512.maskz.pternlog."))) { 3307 bool ZeroMask = Name[11] == 'z'; 3308 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3309 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 3310 Intrinsic::ID IID; 3311 if (VecWidth == 128 && EltWidth == 32) 3312 IID = Intrinsic::x86_avx512_pternlog_d_128; 3313 else if (VecWidth == 256 && EltWidth == 32) 3314 IID = Intrinsic::x86_avx512_pternlog_d_256; 3315 else if (VecWidth == 512 && EltWidth == 32) 3316 IID = Intrinsic::x86_avx512_pternlog_d_512; 3317 else if (VecWidth == 128 && EltWidth == 64) 3318 IID = Intrinsic::x86_avx512_pternlog_q_128; 3319 else if (VecWidth == 256 && EltWidth == 64) 3320 IID = Intrinsic::x86_avx512_pternlog_q_256; 3321 else if (VecWidth == 512 && EltWidth == 64) 3322 IID = Intrinsic::x86_avx512_pternlog_q_512; 3323 else 3324 llvm_unreachable("Unexpected intrinsic"); 3325 3326 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3327 CI->getArgOperand(2), CI->getArgOperand(3) }; 3328 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3329 Args); 3330 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3331 : CI->getArgOperand(0); 3332 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru); 3333 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") || 3334 Name.startswith("avx512.maskz.vpmadd52"))) { 3335 bool ZeroMask = Name[11] == 'z'; 3336 bool High = Name[20] == 'h' || Name[21] == 'h'; 3337 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3338 Intrinsic::ID IID; 3339 if (VecWidth == 128 && !High) 3340 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128; 3341 else if (VecWidth == 256 && !High) 3342 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256; 3343 else if (VecWidth == 512 && !High) 3344 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512; 3345 else if (VecWidth == 128 && High) 3346 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128; 3347 else if (VecWidth == 256 && High) 3348 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256; 3349 else if (VecWidth == 512 && High) 3350 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512; 3351 else 3352 llvm_unreachable("Unexpected intrinsic"); 3353 3354 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3355 CI->getArgOperand(2) }; 3356 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3357 Args); 3358 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3359 : CI->getArgOperand(0); 3360 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3361 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") || 3362 Name.startswith("avx512.mask.vpermt2var.") || 3363 Name.startswith("avx512.maskz.vpermt2var."))) { 3364 bool ZeroMask = Name[11] == 'z'; 3365 bool IndexForm = Name[17] == 'i'; 3366 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm); 3367 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") || 3368 Name.startswith("avx512.maskz.vpdpbusd.") || 3369 Name.startswith("avx512.mask.vpdpbusds.") || 3370 Name.startswith("avx512.maskz.vpdpbusds."))) { 3371 bool ZeroMask = Name[11] == 'z'; 3372 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3373 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3374 Intrinsic::ID IID; 3375 if (VecWidth == 128 && !IsSaturating) 3376 IID = Intrinsic::x86_avx512_vpdpbusd_128; 3377 else if (VecWidth == 256 && !IsSaturating) 3378 IID = Intrinsic::x86_avx512_vpdpbusd_256; 3379 else if (VecWidth == 512 && !IsSaturating) 3380 IID = Intrinsic::x86_avx512_vpdpbusd_512; 3381 else if (VecWidth == 128 && IsSaturating) 3382 IID = Intrinsic::x86_avx512_vpdpbusds_128; 3383 else if (VecWidth == 256 && IsSaturating) 3384 IID = Intrinsic::x86_avx512_vpdpbusds_256; 3385 else if (VecWidth == 512 && IsSaturating) 3386 IID = Intrinsic::x86_avx512_vpdpbusds_512; 3387 else 3388 llvm_unreachable("Unexpected intrinsic"); 3389 3390 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3391 CI->getArgOperand(2) }; 3392 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3393 Args); 3394 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3395 : CI->getArgOperand(0); 3396 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3397 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") || 3398 Name.startswith("avx512.maskz.vpdpwssd.") || 3399 Name.startswith("avx512.mask.vpdpwssds.") || 3400 Name.startswith("avx512.maskz.vpdpwssds."))) { 3401 bool ZeroMask = Name[11] == 'z'; 3402 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3403 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3404 Intrinsic::ID IID; 3405 if (VecWidth == 128 && !IsSaturating) 3406 IID = Intrinsic::x86_avx512_vpdpwssd_128; 3407 else if (VecWidth == 256 && !IsSaturating) 3408 IID = Intrinsic::x86_avx512_vpdpwssd_256; 3409 else if (VecWidth == 512 && !IsSaturating) 3410 IID = Intrinsic::x86_avx512_vpdpwssd_512; 3411 else if (VecWidth == 128 && IsSaturating) 3412 IID = Intrinsic::x86_avx512_vpdpwssds_128; 3413 else if (VecWidth == 256 && IsSaturating) 3414 IID = Intrinsic::x86_avx512_vpdpwssds_256; 3415 else if (VecWidth == 512 && IsSaturating) 3416 IID = Intrinsic::x86_avx512_vpdpwssds_512; 3417 else 3418 llvm_unreachable("Unexpected intrinsic"); 3419 3420 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3421 CI->getArgOperand(2) }; 3422 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3423 Args); 3424 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3425 : CI->getArgOperand(0); 3426 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3427 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" || 3428 Name == "addcarry.u32" || Name == "addcarry.u64" || 3429 Name == "subborrow.u32" || Name == "subborrow.u64")) { 3430 Intrinsic::ID IID; 3431 if (Name[0] == 'a' && Name.back() == '2') 3432 IID = Intrinsic::x86_addcarry_32; 3433 else if (Name[0] == 'a' && Name.back() == '4') 3434 IID = Intrinsic::x86_addcarry_64; 3435 else if (Name[0] == 's' && Name.back() == '2') 3436 IID = Intrinsic::x86_subborrow_32; 3437 else if (Name[0] == 's' && Name.back() == '4') 3438 IID = Intrinsic::x86_subborrow_64; 3439 else 3440 llvm_unreachable("Unexpected intrinsic"); 3441 3442 // Make a call with 3 operands. 3443 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3444 CI->getArgOperand(2)}; 3445 Value *NewCall = Builder.CreateCall( 3446 Intrinsic::getDeclaration(CI->getModule(), IID), 3447 Args); 3448 3449 // Extract the second result and store it. 3450 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3451 // Cast the pointer to the right type. 3452 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3), 3453 llvm::PointerType::getUnqual(Data->getType())); 3454 Builder.CreateAlignedStore(Data, Ptr, Align(1)); 3455 // Replace the original call result with the first result of the new call. 3456 Value *CF = Builder.CreateExtractValue(NewCall, 0); 3457 3458 CI->replaceAllUsesWith(CF); 3459 Rep = nullptr; 3460 } else if (IsX86 && Name.startswith("avx512.mask.") && 3461 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) { 3462 // Rep will be updated by the call in the condition. 3463 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 3464 Value *Arg = CI->getArgOperand(0); 3465 Value *Neg = Builder.CreateNeg(Arg, "neg"); 3466 Value *Cmp = Builder.CreateICmpSGE( 3467 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 3468 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 3469 } else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") || 3470 Name.startswith("atomic.load.add.f64.p"))) { 3471 Value *Ptr = CI->getArgOperand(0); 3472 Value *Val = CI->getArgOperand(1); 3473 Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, 3474 AtomicOrdering::SequentiallyConsistent); 3475 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 3476 Name == "max.ui" || Name == "max.ull")) { 3477 Value *Arg0 = CI->getArgOperand(0); 3478 Value *Arg1 = CI->getArgOperand(1); 3479 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3480 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 3481 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 3482 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 3483 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 3484 Name == "min.ui" || Name == "min.ull")) { 3485 Value *Arg0 = CI->getArgOperand(0); 3486 Value *Arg1 = CI->getArgOperand(1); 3487 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3488 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 3489 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 3490 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 3491 } else if (IsNVVM && Name == "clz.ll") { 3492 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 3493 Value *Arg = CI->getArgOperand(0); 3494 Value *Ctlz = Builder.CreateCall( 3495 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 3496 {Arg->getType()}), 3497 {Arg, Builder.getFalse()}, "ctlz"); 3498 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 3499 } else if (IsNVVM && Name == "popc.ll") { 3500 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 3501 // i64. 3502 Value *Arg = CI->getArgOperand(0); 3503 Value *Popc = Builder.CreateCall( 3504 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 3505 {Arg->getType()}), 3506 Arg, "ctpop"); 3507 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 3508 } else if (IsNVVM && Name == "h2f") { 3509 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 3510 F->getParent(), Intrinsic::convert_from_fp16, 3511 {Builder.getFloatTy()}), 3512 CI->getArgOperand(0), "h2f"); 3513 } else { 3514 llvm_unreachable("Unknown function for CallInst upgrade."); 3515 } 3516 3517 if (Rep) 3518 CI->replaceAllUsesWith(Rep); 3519 CI->eraseFromParent(); 3520 return; 3521 } 3522 3523 const auto &DefaultCase = [&NewFn, &CI]() -> void { 3524 // Handle generic mangling change, but nothing else 3525 assert( 3526 (CI->getCalledFunction()->getName() != NewFn->getName()) && 3527 "Unknown function for CallInst upgrade and isn't just a name change"); 3528 CI->setCalledFunction(NewFn); 3529 }; 3530 CallInst *NewCall = nullptr; 3531 switch (NewFn->getIntrinsicID()) { 3532 default: { 3533 DefaultCase(); 3534 return; 3535 } 3536 case Intrinsic::experimental_vector_reduce_v2_fmul: { 3537 SmallVector<Value *, 2> Args; 3538 if (CI->isFast()) 3539 Args.push_back(ConstantFP::get(CI->getOperand(0)->getType(), 1.0)); 3540 else 3541 Args.push_back(CI->getOperand(0)); 3542 Args.push_back(CI->getOperand(1)); 3543 NewCall = Builder.CreateCall(NewFn, Args); 3544 cast<Instruction>(NewCall)->copyFastMathFlags(CI); 3545 break; 3546 } 3547 case Intrinsic::experimental_vector_reduce_v2_fadd: { 3548 SmallVector<Value *, 2> Args; 3549 if (CI->isFast()) 3550 Args.push_back(Constant::getNullValue(CI->getOperand(0)->getType())); 3551 else 3552 Args.push_back(CI->getOperand(0)); 3553 Args.push_back(CI->getOperand(1)); 3554 NewCall = Builder.CreateCall(NewFn, Args); 3555 cast<Instruction>(NewCall)->copyFastMathFlags(CI); 3556 break; 3557 } 3558 case Intrinsic::arm_neon_vld1: 3559 case Intrinsic::arm_neon_vld2: 3560 case Intrinsic::arm_neon_vld3: 3561 case Intrinsic::arm_neon_vld4: 3562 case Intrinsic::arm_neon_vld2lane: 3563 case Intrinsic::arm_neon_vld3lane: 3564 case Intrinsic::arm_neon_vld4lane: 3565 case Intrinsic::arm_neon_vst1: 3566 case Intrinsic::arm_neon_vst2: 3567 case Intrinsic::arm_neon_vst3: 3568 case Intrinsic::arm_neon_vst4: 3569 case Intrinsic::arm_neon_vst2lane: 3570 case Intrinsic::arm_neon_vst3lane: 3571 case Intrinsic::arm_neon_vst4lane: { 3572 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3573 CI->arg_operands().end()); 3574 NewCall = Builder.CreateCall(NewFn, Args); 3575 break; 3576 } 3577 3578 case Intrinsic::bitreverse: 3579 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3580 break; 3581 3582 case Intrinsic::ctlz: 3583 case Intrinsic::cttz: 3584 assert(CI->getNumArgOperands() == 1 && 3585 "Mismatch between function args and call args"); 3586 NewCall = 3587 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 3588 break; 3589 3590 case Intrinsic::objectsize: { 3591 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 3592 ? Builder.getFalse() 3593 : CI->getArgOperand(2); 3594 Value *Dynamic = 3595 CI->getNumArgOperands() < 4 ? Builder.getFalse() : CI->getArgOperand(3); 3596 NewCall = Builder.CreateCall( 3597 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic}); 3598 break; 3599 } 3600 3601 case Intrinsic::ctpop: 3602 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3603 break; 3604 3605 case Intrinsic::convert_from_fp16: 3606 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3607 break; 3608 3609 case Intrinsic::dbg_value: 3610 // Upgrade from the old version that had an extra offset argument. 3611 assert(CI->getNumArgOperands() == 4); 3612 // Drop nonzero offsets instead of attempting to upgrade them. 3613 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1))) 3614 if (Offset->isZeroValue()) { 3615 NewCall = Builder.CreateCall( 3616 NewFn, 3617 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)}); 3618 break; 3619 } 3620 CI->eraseFromParent(); 3621 return; 3622 3623 case Intrinsic::x86_xop_vfrcz_ss: 3624 case Intrinsic::x86_xop_vfrcz_sd: 3625 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 3626 break; 3627 3628 case Intrinsic::x86_xop_vpermil2pd: 3629 case Intrinsic::x86_xop_vpermil2ps: 3630 case Intrinsic::x86_xop_vpermil2pd_256: 3631 case Intrinsic::x86_xop_vpermil2ps_256: { 3632 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3633 CI->arg_operands().end()); 3634 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 3635 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 3636 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 3637 NewCall = Builder.CreateCall(NewFn, Args); 3638 break; 3639 } 3640 3641 case Intrinsic::x86_sse41_ptestc: 3642 case Intrinsic::x86_sse41_ptestz: 3643 case Intrinsic::x86_sse41_ptestnzc: { 3644 // The arguments for these intrinsics used to be v4f32, and changed 3645 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 3646 // So, the only thing required is a bitcast for both arguments. 3647 // First, check the arguments have the old type. 3648 Value *Arg0 = CI->getArgOperand(0); 3649 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 3650 return; 3651 3652 // Old intrinsic, add bitcasts 3653 Value *Arg1 = CI->getArgOperand(1); 3654 3655 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 3656 3657 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 3658 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 3659 3660 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 3661 break; 3662 } 3663 3664 case Intrinsic::x86_rdtscp: { 3665 // This used to take 1 arguments. If we have no arguments, it is already 3666 // upgraded. 3667 if (CI->getNumOperands() == 0) 3668 return; 3669 3670 NewCall = Builder.CreateCall(NewFn); 3671 // Extract the second result and store it. 3672 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3673 // Cast the pointer to the right type. 3674 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0), 3675 llvm::PointerType::getUnqual(Data->getType())); 3676 Builder.CreateAlignedStore(Data, Ptr, Align(1)); 3677 // Replace the original call result with the first result of the new call. 3678 Value *TSC = Builder.CreateExtractValue(NewCall, 0); 3679 3680 std::string Name = std::string(CI->getName()); 3681 if (!Name.empty()) { 3682 CI->setName(Name + ".old"); 3683 NewCall->setName(Name); 3684 } 3685 CI->replaceAllUsesWith(TSC); 3686 CI->eraseFromParent(); 3687 return; 3688 } 3689 3690 case Intrinsic::x86_sse41_insertps: 3691 case Intrinsic::x86_sse41_dppd: 3692 case Intrinsic::x86_sse41_dpps: 3693 case Intrinsic::x86_sse41_mpsadbw: 3694 case Intrinsic::x86_avx_dp_ps_256: 3695 case Intrinsic::x86_avx2_mpsadbw: { 3696 // Need to truncate the last argument from i32 to i8 -- this argument models 3697 // an inherently 8-bit immediate operand to these x86 instructions. 3698 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3699 CI->arg_operands().end()); 3700 3701 // Replace the last argument with a trunc. 3702 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 3703 NewCall = Builder.CreateCall(NewFn, Args); 3704 break; 3705 } 3706 3707 case Intrinsic::thread_pointer: { 3708 NewCall = Builder.CreateCall(NewFn, {}); 3709 break; 3710 } 3711 3712 case Intrinsic::invariant_start: 3713 case Intrinsic::invariant_end: 3714 case Intrinsic::masked_load: 3715 case Intrinsic::masked_store: 3716 case Intrinsic::masked_gather: 3717 case Intrinsic::masked_scatter: { 3718 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3719 CI->arg_operands().end()); 3720 NewCall = Builder.CreateCall(NewFn, Args); 3721 break; 3722 } 3723 3724 case Intrinsic::memcpy: 3725 case Intrinsic::memmove: 3726 case Intrinsic::memset: { 3727 // We have to make sure that the call signature is what we're expecting. 3728 // We only want to change the old signatures by removing the alignment arg: 3729 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1) 3730 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1) 3731 // @llvm.memset...(i8*, i8, i[32|64], i32, i1) 3732 // -> @llvm.memset...(i8*, i8, i[32|64], i1) 3733 // Note: i8*'s in the above can be any pointer type 3734 if (CI->getNumArgOperands() != 5) { 3735 DefaultCase(); 3736 return; 3737 } 3738 // Remove alignment argument (3), and add alignment attributes to the 3739 // dest/src pointers. 3740 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), 3741 CI->getArgOperand(2), CI->getArgOperand(4)}; 3742 NewCall = Builder.CreateCall(NewFn, Args); 3743 auto *MemCI = cast<MemIntrinsic>(NewCall); 3744 // All mem intrinsics support dest alignment. 3745 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3)); 3746 MemCI->setDestAlignment(Align->getZExtValue()); 3747 // Memcpy/Memmove also support source alignment. 3748 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) 3749 MTI->setSourceAlignment(Align->getZExtValue()); 3750 break; 3751 } 3752 } 3753 assert(NewCall && "Should have either set this variable or returned through " 3754 "the default case"); 3755 std::string Name = std::string(CI->getName()); 3756 if (!Name.empty()) { 3757 CI->setName(Name + ".old"); 3758 NewCall->setName(Name); 3759 } 3760 CI->replaceAllUsesWith(NewCall); 3761 CI->eraseFromParent(); 3762 } 3763 3764 void llvm::UpgradeCallsToIntrinsic(Function *F) { 3765 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 3766 3767 // Check if this function should be upgraded and get the replacement function 3768 // if there is one. 3769 Function *NewFn; 3770 if (UpgradeIntrinsicFunction(F, NewFn)) { 3771 // Replace all users of the old function with the new function or new 3772 // instructions. This is not a range loop because the call is deleted. 3773 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 3774 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 3775 UpgradeIntrinsicCall(CI, NewFn); 3776 3777 // Remove old function, no longer used, from the module. 3778 F->eraseFromParent(); 3779 } 3780 } 3781 3782 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 3783 // Check if the tag uses struct-path aware TBAA format. 3784 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 3785 return &MD; 3786 3787 auto &Context = MD.getContext(); 3788 if (MD.getNumOperands() == 3) { 3789 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 3790 MDNode *ScalarType = MDNode::get(Context, Elts); 3791 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 3792 Metadata *Elts2[] = {ScalarType, ScalarType, 3793 ConstantAsMetadata::get( 3794 Constant::getNullValue(Type::getInt64Ty(Context))), 3795 MD.getOperand(2)}; 3796 return MDNode::get(Context, Elts2); 3797 } 3798 // Create a MDNode <MD, MD, offset 0> 3799 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 3800 Type::getInt64Ty(Context)))}; 3801 return MDNode::get(Context, Elts); 3802 } 3803 3804 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 3805 Instruction *&Temp) { 3806 if (Opc != Instruction::BitCast) 3807 return nullptr; 3808 3809 Temp = nullptr; 3810 Type *SrcTy = V->getType(); 3811 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3812 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3813 LLVMContext &Context = V->getContext(); 3814 3815 // We have no information about target data layout, so we assume that 3816 // the maximum pointer size is 64bit. 3817 Type *MidTy = Type::getInt64Ty(Context); 3818 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 3819 3820 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 3821 } 3822 3823 return nullptr; 3824 } 3825 3826 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 3827 if (Opc != Instruction::BitCast) 3828 return nullptr; 3829 3830 Type *SrcTy = C->getType(); 3831 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3832 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3833 LLVMContext &Context = C->getContext(); 3834 3835 // We have no information about target data layout, so we assume that 3836 // the maximum pointer size is 64bit. 3837 Type *MidTy = Type::getInt64Ty(Context); 3838 3839 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 3840 DestTy); 3841 } 3842 3843 return nullptr; 3844 } 3845 3846 /// Check the debug info version number, if it is out-dated, drop the debug 3847 /// info. Return true if module is modified. 3848 bool llvm::UpgradeDebugInfo(Module &M) { 3849 unsigned Version = getDebugMetadataVersionFromModule(M); 3850 if (Version == DEBUG_METADATA_VERSION) { 3851 bool BrokenDebugInfo = false; 3852 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo)) 3853 report_fatal_error("Broken module found, compilation aborted!"); 3854 if (!BrokenDebugInfo) 3855 // Everything is ok. 3856 return false; 3857 else { 3858 // Diagnose malformed debug info. 3859 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M); 3860 M.getContext().diagnose(Diag); 3861 } 3862 } 3863 bool Modified = StripDebugInfo(M); 3864 if (Modified && Version != DEBUG_METADATA_VERSION) { 3865 // Diagnose a version mismatch. 3866 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 3867 M.getContext().diagnose(DiagVersion); 3868 } 3869 return Modified; 3870 } 3871 3872 /// This checks for objc retain release marker which should be upgraded. It 3873 /// returns true if module is modified. 3874 static bool UpgradeRetainReleaseMarker(Module &M) { 3875 bool Changed = false; 3876 const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker"; 3877 NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey); 3878 if (ModRetainReleaseMarker) { 3879 MDNode *Op = ModRetainReleaseMarker->getOperand(0); 3880 if (Op) { 3881 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0)); 3882 if (ID) { 3883 SmallVector<StringRef, 4> ValueComp; 3884 ID->getString().split(ValueComp, "#"); 3885 if (ValueComp.size() == 2) { 3886 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str(); 3887 ID = MDString::get(M.getContext(), NewValue); 3888 } 3889 M.addModuleFlag(Module::Error, MarkerKey, ID); 3890 M.eraseNamedMetadata(ModRetainReleaseMarker); 3891 Changed = true; 3892 } 3893 } 3894 } 3895 return Changed; 3896 } 3897 3898 void llvm::UpgradeARCRuntime(Module &M) { 3899 // This lambda converts normal function calls to ARC runtime functions to 3900 // intrinsic calls. 3901 auto UpgradeToIntrinsic = [&](const char *OldFunc, 3902 llvm::Intrinsic::ID IntrinsicFunc) { 3903 Function *Fn = M.getFunction(OldFunc); 3904 3905 if (!Fn) 3906 return; 3907 3908 Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc); 3909 3910 for (auto I = Fn->user_begin(), E = Fn->user_end(); I != E;) { 3911 CallInst *CI = dyn_cast<CallInst>(*I++); 3912 if (!CI || CI->getCalledFunction() != Fn) 3913 continue; 3914 3915 IRBuilder<> Builder(CI->getParent(), CI->getIterator()); 3916 FunctionType *NewFuncTy = NewFn->getFunctionType(); 3917 SmallVector<Value *, 2> Args; 3918 3919 // Don't upgrade the intrinsic if it's not valid to bitcast the return 3920 // value to the return type of the old function. 3921 if (NewFuncTy->getReturnType() != CI->getType() && 3922 !CastInst::castIsValid(Instruction::BitCast, CI, 3923 NewFuncTy->getReturnType())) 3924 continue; 3925 3926 bool InvalidCast = false; 3927 3928 for (unsigned I = 0, E = CI->getNumArgOperands(); I != E; ++I) { 3929 Value *Arg = CI->getArgOperand(I); 3930 3931 // Bitcast argument to the parameter type of the new function if it's 3932 // not a variadic argument. 3933 if (I < NewFuncTy->getNumParams()) { 3934 // Don't upgrade the intrinsic if it's not valid to bitcast the argument 3935 // to the parameter type of the new function. 3936 if (!CastInst::castIsValid(Instruction::BitCast, Arg, 3937 NewFuncTy->getParamType(I))) { 3938 InvalidCast = true; 3939 break; 3940 } 3941 Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I)); 3942 } 3943 Args.push_back(Arg); 3944 } 3945 3946 if (InvalidCast) 3947 continue; 3948 3949 // Create a call instruction that calls the new function. 3950 CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args); 3951 NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind()); 3952 NewCall->setName(CI->getName()); 3953 3954 // Bitcast the return value back to the type of the old call. 3955 Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType()); 3956 3957 if (!CI->use_empty()) 3958 CI->replaceAllUsesWith(NewRetVal); 3959 CI->eraseFromParent(); 3960 } 3961 3962 if (Fn->use_empty()) 3963 Fn->eraseFromParent(); 3964 }; 3965 3966 // Unconditionally convert a call to "clang.arc.use" to a call to 3967 // "llvm.objc.clang.arc.use". 3968 UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use); 3969 3970 // Upgrade the retain release marker. If there is no need to upgrade 3971 // the marker, that means either the module is already new enough to contain 3972 // new intrinsics or it is not ARC. There is no need to upgrade runtime call. 3973 if (!UpgradeRetainReleaseMarker(M)) 3974 return; 3975 3976 std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = { 3977 {"objc_autorelease", llvm::Intrinsic::objc_autorelease}, 3978 {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop}, 3979 {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush}, 3980 {"objc_autoreleaseReturnValue", 3981 llvm::Intrinsic::objc_autoreleaseReturnValue}, 3982 {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak}, 3983 {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak}, 3984 {"objc_initWeak", llvm::Intrinsic::objc_initWeak}, 3985 {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak}, 3986 {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained}, 3987 {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak}, 3988 {"objc_release", llvm::Intrinsic::objc_release}, 3989 {"objc_retain", llvm::Intrinsic::objc_retain}, 3990 {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease}, 3991 {"objc_retainAutoreleaseReturnValue", 3992 llvm::Intrinsic::objc_retainAutoreleaseReturnValue}, 3993 {"objc_retainAutoreleasedReturnValue", 3994 llvm::Intrinsic::objc_retainAutoreleasedReturnValue}, 3995 {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock}, 3996 {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong}, 3997 {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak}, 3998 {"objc_unsafeClaimAutoreleasedReturnValue", 3999 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue}, 4000 {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject}, 4001 {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject}, 4002 {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer}, 4003 {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease}, 4004 {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter}, 4005 {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit}, 4006 {"objc_arc_annotation_topdown_bbstart", 4007 llvm::Intrinsic::objc_arc_annotation_topdown_bbstart}, 4008 {"objc_arc_annotation_topdown_bbend", 4009 llvm::Intrinsic::objc_arc_annotation_topdown_bbend}, 4010 {"objc_arc_annotation_bottomup_bbstart", 4011 llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart}, 4012 {"objc_arc_annotation_bottomup_bbend", 4013 llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}}; 4014 4015 for (auto &I : RuntimeFuncs) 4016 UpgradeToIntrinsic(I.first, I.second); 4017 } 4018 4019 bool llvm::UpgradeModuleFlags(Module &M) { 4020 NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 4021 if (!ModFlags) 4022 return false; 4023 4024 bool HasObjCFlag = false, HasClassProperties = false, Changed = false; 4025 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 4026 MDNode *Op = ModFlags->getOperand(I); 4027 if (Op->getNumOperands() != 3) 4028 continue; 4029 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 4030 if (!ID) 4031 continue; 4032 if (ID->getString() == "Objective-C Image Info Version") 4033 HasObjCFlag = true; 4034 if (ID->getString() == "Objective-C Class Properties") 4035 HasClassProperties = true; 4036 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two 4037 // field was Error and now they are Max. 4038 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") { 4039 if (auto *Behavior = 4040 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) { 4041 if (Behavior->getLimitedValue() == Module::Error) { 4042 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 4043 Metadata *Ops[3] = { 4044 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)), 4045 MDString::get(M.getContext(), ID->getString()), 4046 Op->getOperand(2)}; 4047 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 4048 Changed = true; 4049 } 4050 } 4051 } 4052 // Upgrade Objective-C Image Info Section. Removed the whitespce in the 4053 // section name so that llvm-lto will not complain about mismatching 4054 // module flags that is functionally the same. 4055 if (ID->getString() == "Objective-C Image Info Section") { 4056 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) { 4057 SmallVector<StringRef, 4> ValueComp; 4058 Value->getString().split(ValueComp, " "); 4059 if (ValueComp.size() != 1) { 4060 std::string NewValue; 4061 for (auto &S : ValueComp) 4062 NewValue += S.str(); 4063 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1), 4064 MDString::get(M.getContext(), NewValue)}; 4065 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 4066 Changed = true; 4067 } 4068 } 4069 } 4070 } 4071 4072 // "Objective-C Class Properties" is recently added for Objective-C. We 4073 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 4074 // flag of value 0, so we can correclty downgrade this flag when trying to 4075 // link an ObjC bitcode without this module flag with an ObjC bitcode with 4076 // this module flag. 4077 if (HasObjCFlag && !HasClassProperties) { 4078 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 4079 (uint32_t)0); 4080 Changed = true; 4081 } 4082 4083 return Changed; 4084 } 4085 4086 void llvm::UpgradeSectionAttributes(Module &M) { 4087 auto TrimSpaces = [](StringRef Section) -> std::string { 4088 SmallVector<StringRef, 5> Components; 4089 Section.split(Components, ','); 4090 4091 SmallString<32> Buffer; 4092 raw_svector_ostream OS(Buffer); 4093 4094 for (auto Component : Components) 4095 OS << ',' << Component.trim(); 4096 4097 return std::string(OS.str().substr(1)); 4098 }; 4099 4100 for (auto &GV : M.globals()) { 4101 if (!GV.hasSection()) 4102 continue; 4103 4104 StringRef Section = GV.getSection(); 4105 4106 if (!Section.startswith("__DATA, __objc_catlist")) 4107 continue; 4108 4109 // __DATA, __objc_catlist, regular, no_dead_strip 4110 // __DATA,__objc_catlist,regular,no_dead_strip 4111 GV.setSection(TrimSpaces(Section)); 4112 } 4113 } 4114 4115 static bool isOldLoopArgument(Metadata *MD) { 4116 auto *T = dyn_cast_or_null<MDTuple>(MD); 4117 if (!T) 4118 return false; 4119 if (T->getNumOperands() < 1) 4120 return false; 4121 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 4122 if (!S) 4123 return false; 4124 return S->getString().startswith("llvm.vectorizer."); 4125 } 4126 4127 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 4128 StringRef OldPrefix = "llvm.vectorizer."; 4129 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 4130 4131 if (OldTag == "llvm.vectorizer.unroll") 4132 return MDString::get(C, "llvm.loop.interleave.count"); 4133 4134 return MDString::get( 4135 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 4136 .str()); 4137 } 4138 4139 static Metadata *upgradeLoopArgument(Metadata *MD) { 4140 auto *T = dyn_cast_or_null<MDTuple>(MD); 4141 if (!T) 4142 return MD; 4143 if (T->getNumOperands() < 1) 4144 return MD; 4145 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 4146 if (!OldTag) 4147 return MD; 4148 if (!OldTag->getString().startswith("llvm.vectorizer.")) 4149 return MD; 4150 4151 // This has an old tag. Upgrade it. 4152 SmallVector<Metadata *, 8> Ops; 4153 Ops.reserve(T->getNumOperands()); 4154 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 4155 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 4156 Ops.push_back(T->getOperand(I)); 4157 4158 return MDTuple::get(T->getContext(), Ops); 4159 } 4160 4161 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 4162 auto *T = dyn_cast<MDTuple>(&N); 4163 if (!T) 4164 return &N; 4165 4166 if (none_of(T->operands(), isOldLoopArgument)) 4167 return &N; 4168 4169 SmallVector<Metadata *, 8> Ops; 4170 Ops.reserve(T->getNumOperands()); 4171 for (Metadata *MD : T->operands()) 4172 Ops.push_back(upgradeLoopArgument(MD)); 4173 4174 return MDTuple::get(T->getContext(), Ops); 4175 } 4176 4177 std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) { 4178 std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64"; 4179 4180 // If X86, and the datalayout matches the expected format, add pointer size 4181 // address spaces to the datalayout. 4182 if (!Triple(TT).isX86() || DL.contains(AddrSpaces)) 4183 return std::string(DL); 4184 4185 SmallVector<StringRef, 4> Groups; 4186 Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)"); 4187 if (!R.match(DL, &Groups)) 4188 return std::string(DL); 4189 4190 SmallString<1024> Buf; 4191 std::string Res = (Groups[1] + AddrSpaces + Groups[3]).toStringRef(Buf).str(); 4192 return Res; 4193 } 4194 4195 void llvm::UpgradeFramePointerAttributes(AttrBuilder &B) { 4196 StringRef FramePointer; 4197 if (B.contains("no-frame-pointer-elim")) { 4198 // The value can be "true" or "false". 4199 for (const auto &I : B.td_attrs()) 4200 if (I.first == "no-frame-pointer-elim") 4201 FramePointer = I.second == "true" ? "all" : "none"; 4202 B.removeAttribute("no-frame-pointer-elim"); 4203 } 4204 if (B.contains("no-frame-pointer-elim-non-leaf")) { 4205 // The value is ignored. "no-frame-pointer-elim"="true" takes priority. 4206 if (FramePointer != "all") 4207 FramePointer = "non-leaf"; 4208 B.removeAttribute("no-frame-pointer-elim-non-leaf"); 4209 } 4210 4211 if (!FramePointer.empty()) 4212 B.addAttribute("frame-pointer", FramePointer); 4213 } 4214