1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the auto-upgrade helper functions. 10 // This is where deprecated IR intrinsics and other IR features are updated to 11 // current specifications. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/IR/AutoUpgrade.h" 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/IR/Constants.h" 18 #include "llvm/IR/DIBuilder.h" 19 #include "llvm/IR/DebugInfo.h" 20 #include "llvm/IR/DiagnosticInfo.h" 21 #include "llvm/IR/Function.h" 22 #include "llvm/IR/IRBuilder.h" 23 #include "llvm/IR/Instruction.h" 24 #include "llvm/IR/IntrinsicInst.h" 25 #include "llvm/IR/LLVMContext.h" 26 #include "llvm/IR/Module.h" 27 #include "llvm/IR/Verifier.h" 28 #include "llvm/Support/ErrorHandling.h" 29 #include "llvm/Support/Regex.h" 30 #include <cstring> 31 using namespace llvm; 32 33 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 34 35 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 36 // changed their type from v4f32 to v2i64. 37 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 38 Function *&NewFn) { 39 // Check whether this is an old version of the function, which received 40 // v4f32 arguments. 41 Type *Arg0Type = F->getFunctionType()->getParamType(0); 42 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 43 return false; 44 45 // Yes, it's old, replace it with new version. 46 rename(F); 47 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 48 return true; 49 } 50 51 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 52 // arguments have changed their type from i32 to i8. 53 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 54 Function *&NewFn) { 55 // Check that the last argument is an i32. 56 Type *LastArgType = F->getFunctionType()->getParamType( 57 F->getFunctionType()->getNumParams() - 1); 58 if (!LastArgType->isIntegerTy(32)) 59 return false; 60 61 // Move this function aside and map down. 62 rename(F); 63 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 64 return true; 65 } 66 67 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 68 // All of the intrinsics matches below should be marked with which llvm 69 // version started autoupgrading them. At some point in the future we would 70 // like to use this information to remove upgrade code for some older 71 // intrinsics. It is currently undecided how we will determine that future 72 // point. 73 if (Name == "addcarryx.u32" || // Added in 8.0 74 Name == "addcarryx.u64" || // Added in 8.0 75 Name == "addcarry.u32" || // Added in 8.0 76 Name == "addcarry.u64" || // Added in 8.0 77 Name == "subborrow.u32" || // Added in 8.0 78 Name == "subborrow.u64" || // Added in 8.0 79 Name.startswith("sse2.padds.") || // Added in 8.0 80 Name.startswith("sse2.psubs.") || // Added in 8.0 81 Name.startswith("sse2.paddus.") || // Added in 8.0 82 Name.startswith("sse2.psubus.") || // Added in 8.0 83 Name.startswith("avx2.padds.") || // Added in 8.0 84 Name.startswith("avx2.psubs.") || // Added in 8.0 85 Name.startswith("avx2.paddus.") || // Added in 8.0 86 Name.startswith("avx2.psubus.") || // Added in 8.0 87 Name.startswith("avx512.padds.") || // Added in 8.0 88 Name.startswith("avx512.psubs.") || // Added in 8.0 89 Name.startswith("avx512.mask.padds.") || // Added in 8.0 90 Name.startswith("avx512.mask.psubs.") || // Added in 8.0 91 Name.startswith("avx512.mask.paddus.") || // Added in 8.0 92 Name.startswith("avx512.mask.psubus.") || // Added in 8.0 93 Name=="ssse3.pabs.b.128" || // Added in 6.0 94 Name=="ssse3.pabs.w.128" || // Added in 6.0 95 Name=="ssse3.pabs.d.128" || // Added in 6.0 96 Name.startswith("fma4.vfmadd.s") || // Added in 7.0 97 Name.startswith("fma.vfmadd.") || // Added in 7.0 98 Name.startswith("fma.vfmsub.") || // Added in 7.0 99 Name.startswith("fma.vfmaddsub.") || // Added in 7.0 100 Name.startswith("fma.vfmsubadd.") || // Added in 7.0 101 Name.startswith("fma.vfnmadd.") || // Added in 7.0 102 Name.startswith("fma.vfnmsub.") || // Added in 7.0 103 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0 104 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0 105 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0 106 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0 107 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0 108 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0 109 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0 110 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0 111 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0 112 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0 113 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0 114 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0 115 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0 116 Name.startswith("avx512.kunpck") || //added in 6.0 117 Name.startswith("avx2.pabs.") || // Added in 6.0 118 Name.startswith("avx512.mask.pabs.") || // Added in 6.0 119 Name.startswith("avx512.broadcastm") || // Added in 6.0 120 Name == "sse.sqrt.ss" || // Added in 7.0 121 Name == "sse2.sqrt.sd" || // Added in 7.0 122 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0 123 Name.startswith("avx.sqrt.p") || // Added in 7.0 124 Name.startswith("sse2.sqrt.p") || // Added in 7.0 125 Name.startswith("sse.sqrt.p") || // Added in 7.0 126 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0 127 Name.startswith("sse2.pcmpeq.") || // Added in 3.1 128 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 129 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 130 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 131 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 132 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 133 Name.startswith("avx.vperm2f128.") || // Added in 6.0 134 Name == "avx2.vperm2i128" || // Added in 6.0 135 Name == "sse.add.ss" || // Added in 4.0 136 Name == "sse2.add.sd" || // Added in 4.0 137 Name == "sse.sub.ss" || // Added in 4.0 138 Name == "sse2.sub.sd" || // Added in 4.0 139 Name == "sse.mul.ss" || // Added in 4.0 140 Name == "sse2.mul.sd" || // Added in 4.0 141 Name == "sse.div.ss" || // Added in 4.0 142 Name == "sse2.div.sd" || // Added in 4.0 143 Name == "sse41.pmaxsb" || // Added in 3.9 144 Name == "sse2.pmaxs.w" || // Added in 3.9 145 Name == "sse41.pmaxsd" || // Added in 3.9 146 Name == "sse2.pmaxu.b" || // Added in 3.9 147 Name == "sse41.pmaxuw" || // Added in 3.9 148 Name == "sse41.pmaxud" || // Added in 3.9 149 Name == "sse41.pminsb" || // Added in 3.9 150 Name == "sse2.pmins.w" || // Added in 3.9 151 Name == "sse41.pminsd" || // Added in 3.9 152 Name == "sse2.pminu.b" || // Added in 3.9 153 Name == "sse41.pminuw" || // Added in 3.9 154 Name == "sse41.pminud" || // Added in 3.9 155 Name == "avx512.kand.w" || // Added in 7.0 156 Name == "avx512.kandn.w" || // Added in 7.0 157 Name == "avx512.knot.w" || // Added in 7.0 158 Name == "avx512.kor.w" || // Added in 7.0 159 Name == "avx512.kxor.w" || // Added in 7.0 160 Name == "avx512.kxnor.w" || // Added in 7.0 161 Name == "avx512.kortestc.w" || // Added in 7.0 162 Name == "avx512.kortestz.w" || // Added in 7.0 163 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 164 Name.startswith("avx2.pmax") || // Added in 3.9 165 Name.startswith("avx2.pmin") || // Added in 3.9 166 Name.startswith("avx512.mask.pmax") || // Added in 4.0 167 Name.startswith("avx512.mask.pmin") || // Added in 4.0 168 Name.startswith("avx2.vbroadcast") || // Added in 3.8 169 Name.startswith("avx2.pbroadcast") || // Added in 3.8 170 Name.startswith("avx.vpermil.") || // Added in 3.1 171 Name.startswith("sse2.pshuf") || // Added in 3.9 172 Name.startswith("avx512.pbroadcast") || // Added in 3.9 173 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 174 Name.startswith("avx512.mask.movddup") || // Added in 3.9 175 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 176 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 177 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 178 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 179 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 180 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 181 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 182 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 183 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 184 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 185 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 186 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 187 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 188 Name.startswith("avx512.mask.pand.") || // Added in 3.9 189 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 190 Name.startswith("avx512.mask.por.") || // Added in 3.9 191 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 192 Name.startswith("avx512.mask.and.") || // Added in 3.9 193 Name.startswith("avx512.mask.andn.") || // Added in 3.9 194 Name.startswith("avx512.mask.or.") || // Added in 3.9 195 Name.startswith("avx512.mask.xor.") || // Added in 3.9 196 Name.startswith("avx512.mask.padd.") || // Added in 4.0 197 Name.startswith("avx512.mask.psub.") || // Added in 4.0 198 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 199 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 200 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 201 Name.startswith("avx512.mask.cvtudq2ps.") || // Added in 7.0 updated 9.0 202 Name.startswith("avx512.mask.cvtqq2pd.") || // Added in 7.0 updated 9.0 203 Name.startswith("avx512.mask.cvtuqq2pd.") || // Added in 7.0 updated 9.0 204 Name.startswith("avx512.mask.cvtdq2ps.") || // Added in 7.0 updated 9.0 205 Name == "avx512.mask.cvtqq2ps.256" || // Added in 9.0 206 Name == "avx512.mask.cvtqq2ps.512" || // Added in 9.0 207 Name == "avx512.mask.cvtuqq2ps.256" || // Added in 9.0 208 Name == "avx512.mask.cvtuqq2ps.512" || // Added in 9.0 209 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0 210 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0 211 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0 212 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0 213 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0 214 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0 215 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0 216 Name == "avx512.cvtusi2sd" || // Added in 7.0 217 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 218 Name == "sse2.pmulu.dq" || // Added in 7.0 219 Name == "sse41.pmuldq" || // Added in 7.0 220 Name == "avx2.pmulu.dq" || // Added in 7.0 221 Name == "avx2.pmul.dq" || // Added in 7.0 222 Name == "avx512.pmulu.dq.512" || // Added in 7.0 223 Name == "avx512.pmul.dq.512" || // Added in 7.0 224 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 225 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 226 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0 227 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0 228 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0 229 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0 230 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0 231 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 232 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 233 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 234 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 235 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0 236 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0 237 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0 238 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0 239 Name.startswith("avx512.mask.cmp.p") || // Added in 7.0 240 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0 241 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0 242 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0 243 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0 244 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0 245 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 246 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 247 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 248 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 249 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 250 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 251 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 252 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 253 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 254 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 255 Name.startswith("avx512.mask.pslli") || // Added in 4.0 256 Name.startswith("avx512.mask.psrai") || // Added in 4.0 257 Name.startswith("avx512.mask.psrli") || // Added in 4.0 258 Name.startswith("avx512.mask.psllv") || // Added in 4.0 259 Name.startswith("avx512.mask.psrav") || // Added in 4.0 260 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 261 Name.startswith("sse41.pmovsx") || // Added in 3.8 262 Name.startswith("sse41.pmovzx") || // Added in 3.9 263 Name.startswith("avx2.pmovsx") || // Added in 3.9 264 Name.startswith("avx2.pmovzx") || // Added in 3.9 265 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 266 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 267 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 268 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0 269 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0 270 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0 271 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0 272 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0 273 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0 274 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0 275 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0 276 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0 277 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0 278 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0 279 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0 280 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0 281 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0 282 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0 283 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0 284 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0 285 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0 286 Name.startswith("avx512.mask.vpshldv.") || // Added in 8.0 287 Name.startswith("avx512.mask.vpshrdv.") || // Added in 8.0 288 Name.startswith("avx512.maskz.vpshldv.") || // Added in 8.0 289 Name.startswith("avx512.maskz.vpshrdv.") || // Added in 8.0 290 Name.startswith("avx512.vpshld.") || // Added in 8.0 291 Name.startswith("avx512.vpshrd.") || // Added in 8.0 292 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0 293 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0 294 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0 295 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0 296 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0 297 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0 298 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0 299 Name.startswith("avx512.mask.vpshufbitqmb.") || // Added in 8.0 300 Name.startswith("avx512.mask.pmultishift.qb.") || // Added in 8.0 301 Name.startswith("avx512.mask.conflict.") || // Added in 9.0 302 Name == "avx512.mask.pmov.qd.256" || // Added in 9.0 303 Name == "avx512.mask.pmov.qd.512" || // Added in 9.0 304 Name == "avx512.mask.pmov.wb.256" || // Added in 9.0 305 Name == "avx512.mask.pmov.wb.512" || // Added in 9.0 306 Name == "sse.cvtsi2ss" || // Added in 7.0 307 Name == "sse.cvtsi642ss" || // Added in 7.0 308 Name == "sse2.cvtsi2sd" || // Added in 7.0 309 Name == "sse2.cvtsi642sd" || // Added in 7.0 310 Name == "sse2.cvtss2sd" || // Added in 7.0 311 Name == "sse2.cvtdq2pd" || // Added in 3.9 312 Name == "sse2.cvtdq2ps" || // Added in 7.0 313 Name == "sse2.cvtps2pd" || // Added in 3.9 314 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 315 Name == "avx.cvtdq2.ps.256" || // Added in 7.0 316 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 317 Name.startswith("avx.vinsertf128.") || // Added in 3.7 318 Name == "avx2.vinserti128" || // Added in 3.7 319 Name.startswith("avx512.mask.insert") || // Added in 4.0 320 Name.startswith("avx.vextractf128.") || // Added in 3.7 321 Name == "avx2.vextracti128" || // Added in 3.7 322 Name.startswith("avx512.mask.vextract") || // Added in 4.0 323 Name.startswith("sse4a.movnt.") || // Added in 3.9 324 Name.startswith("avx.movnt.") || // Added in 3.2 325 Name.startswith("avx512.storent.") || // Added in 3.9 326 Name == "sse41.movntdqa" || // Added in 5.0 327 Name == "avx2.movntdqa" || // Added in 5.0 328 Name == "avx512.movntdqa" || // Added in 5.0 329 Name == "sse2.storel.dq" || // Added in 3.9 330 Name.startswith("sse.storeu.") || // Added in 3.9 331 Name.startswith("sse2.storeu.") || // Added in 3.9 332 Name.startswith("avx.storeu.") || // Added in 3.9 333 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 334 Name.startswith("avx512.mask.store.p") || // Added in 3.9 335 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 336 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 337 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 338 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 339 Name == "avx512.mask.store.ss" || // Added in 7.0 340 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 341 Name.startswith("avx512.mask.load.") || // Added in 3.9 342 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0 343 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0 344 Name.startswith("avx512.mask.expand.b") || // Added in 9.0 345 Name.startswith("avx512.mask.expand.w") || // Added in 9.0 346 Name.startswith("avx512.mask.expand.d") || // Added in 9.0 347 Name.startswith("avx512.mask.expand.q") || // Added in 9.0 348 Name.startswith("avx512.mask.expand.p") || // Added in 9.0 349 Name.startswith("avx512.mask.compress.b") || // Added in 9.0 350 Name.startswith("avx512.mask.compress.w") || // Added in 9.0 351 Name.startswith("avx512.mask.compress.d") || // Added in 9.0 352 Name.startswith("avx512.mask.compress.q") || // Added in 9.0 353 Name.startswith("avx512.mask.compress.p") || // Added in 9.0 354 Name == "sse42.crc32.64.8" || // Added in 3.4 355 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 356 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0 357 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 358 Name.startswith("avx512.mask.valign.") || // Added in 4.0 359 Name.startswith("sse2.psll.dq") || // Added in 3.7 360 Name.startswith("sse2.psrl.dq") || // Added in 3.7 361 Name.startswith("avx2.psll.dq") || // Added in 3.7 362 Name.startswith("avx2.psrl.dq") || // Added in 3.7 363 Name.startswith("avx512.psll.dq") || // Added in 3.9 364 Name.startswith("avx512.psrl.dq") || // Added in 3.9 365 Name == "sse41.pblendw" || // Added in 3.7 366 Name.startswith("sse41.blendp") || // Added in 3.7 367 Name.startswith("avx.blend.p") || // Added in 3.7 368 Name == "avx2.pblendw" || // Added in 3.7 369 Name.startswith("avx2.pblendd.") || // Added in 3.7 370 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 371 Name == "avx2.vbroadcasti128" || // Added in 3.7 372 Name.startswith("avx512.mask.broadcastf") || // Added in 6.0 373 Name.startswith("avx512.mask.broadcasti") || // Added in 6.0 374 Name == "xop.vpcmov" || // Added in 3.8 375 Name == "xop.vpcmov.256" || // Added in 5.0 376 Name.startswith("avx512.mask.move.s") || // Added in 4.0 377 Name.startswith("avx512.cvtmask2") || // Added in 5.0 378 Name.startswith("xop.vpcom") || // Added in 3.2, Updated in 9.0 379 Name.startswith("xop.vprot") || // Added in 8.0 380 Name.startswith("avx512.prol") || // Added in 8.0 381 Name.startswith("avx512.pror") || // Added in 8.0 382 Name.startswith("avx512.mask.prorv.") || // Added in 8.0 383 Name.startswith("avx512.mask.pror.") || // Added in 8.0 384 Name.startswith("avx512.mask.prolv.") || // Added in 8.0 385 Name.startswith("avx512.mask.prol.") || // Added in 8.0 386 Name.startswith("avx512.ptestm") || //Added in 6.0 387 Name.startswith("avx512.ptestnm") || //Added in 6.0 388 Name.startswith("avx512.mask.pavg")) // Added in 6.0 389 return true; 390 391 return false; 392 } 393 394 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 395 Function *&NewFn) { 396 // Only handle intrinsics that start with "x86.". 397 if (!Name.startswith("x86.")) 398 return false; 399 // Remove "x86." prefix. 400 Name = Name.substr(4); 401 402 if (ShouldUpgradeX86Intrinsic(F, Name)) { 403 NewFn = nullptr; 404 return true; 405 } 406 407 if (Name == "rdtscp") { // Added in 8.0 408 // If this intrinsic has 0 operands, it's the new version. 409 if (F->getFunctionType()->getNumParams() == 0) 410 return false; 411 412 rename(F); 413 NewFn = Intrinsic::getDeclaration(F->getParent(), 414 Intrinsic::x86_rdtscp); 415 return true; 416 } 417 418 // SSE4.1 ptest functions may have an old signature. 419 if (Name.startswith("sse41.ptest")) { // Added in 3.2 420 if (Name.substr(11) == "c") 421 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 422 if (Name.substr(11) == "z") 423 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 424 if (Name.substr(11) == "nzc") 425 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 426 } 427 // Several blend and other instructions with masks used the wrong number of 428 // bits. 429 if (Name == "sse41.insertps") // Added in 3.6 430 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 431 NewFn); 432 if (Name == "sse41.dppd") // Added in 3.6 433 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 434 NewFn); 435 if (Name == "sse41.dpps") // Added in 3.6 436 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 437 NewFn); 438 if (Name == "sse41.mpsadbw") // Added in 3.6 439 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 440 NewFn); 441 if (Name == "avx.dp.ps.256") // Added in 3.6 442 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 443 NewFn); 444 if (Name == "avx2.mpsadbw") // Added in 3.6 445 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 446 NewFn); 447 448 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 449 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 450 rename(F); 451 NewFn = Intrinsic::getDeclaration(F->getParent(), 452 Intrinsic::x86_xop_vfrcz_ss); 453 return true; 454 } 455 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 456 rename(F); 457 NewFn = Intrinsic::getDeclaration(F->getParent(), 458 Intrinsic::x86_xop_vfrcz_sd); 459 return true; 460 } 461 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 462 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 463 auto Idx = F->getFunctionType()->getParamType(2); 464 if (Idx->isFPOrFPVectorTy()) { 465 rename(F); 466 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 467 unsigned EltSize = Idx->getScalarSizeInBits(); 468 Intrinsic::ID Permil2ID; 469 if (EltSize == 64 && IdxSize == 128) 470 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 471 else if (EltSize == 32 && IdxSize == 128) 472 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 473 else if (EltSize == 64 && IdxSize == 256) 474 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 475 else 476 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 477 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 478 return true; 479 } 480 } 481 482 if (Name == "seh.recoverfp") { 483 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_recoverfp); 484 return true; 485 } 486 487 return false; 488 } 489 490 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 491 assert(F && "Illegal to upgrade a non-existent Function."); 492 493 // Quickly eliminate it, if it's not a candidate. 494 StringRef Name = F->getName(); 495 if (Name.size() <= 8 || !Name.startswith("llvm.")) 496 return false; 497 Name = Name.substr(5); // Strip off "llvm." 498 499 switch (Name[0]) { 500 default: break; 501 case 'a': { 502 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 503 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 504 F->arg_begin()->getType()); 505 return true; 506 } 507 if (Name.startswith("arm.neon.vclz")) { 508 Type* args[2] = { 509 F->arg_begin()->getType(), 510 Type::getInt1Ty(F->getContext()) 511 }; 512 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 513 // the end of the name. Change name from llvm.arm.neon.vclz.* to 514 // llvm.ctlz.* 515 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 516 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 517 "llvm.ctlz." + Name.substr(14), F->getParent()); 518 return true; 519 } 520 if (Name.startswith("arm.neon.vcnt")) { 521 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 522 F->arg_begin()->getType()); 523 return true; 524 } 525 static const Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 526 if (vldRegex.match(Name)) { 527 auto fArgs = F->getFunctionType()->params(); 528 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 529 // Can't use Intrinsic::getDeclaration here as the return types might 530 // then only be structurally equal. 531 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 532 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 533 "llvm." + Name + ".p0i8", F->getParent()); 534 return true; 535 } 536 static const Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 537 if (vstRegex.match(Name)) { 538 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 539 Intrinsic::arm_neon_vst2, 540 Intrinsic::arm_neon_vst3, 541 Intrinsic::arm_neon_vst4}; 542 543 static const Intrinsic::ID StoreLaneInts[] = { 544 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 545 Intrinsic::arm_neon_vst4lane 546 }; 547 548 auto fArgs = F->getFunctionType()->params(); 549 Type *Tys[] = {fArgs[0], fArgs[1]}; 550 if (Name.find("lane") == StringRef::npos) 551 NewFn = Intrinsic::getDeclaration(F->getParent(), 552 StoreInts[fArgs.size() - 3], Tys); 553 else 554 NewFn = Intrinsic::getDeclaration(F->getParent(), 555 StoreLaneInts[fArgs.size() - 5], Tys); 556 return true; 557 } 558 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 559 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 560 return true; 561 } 562 if (Name.startswith("arm.neon.vqadds.")) { 563 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::sadd_sat, 564 F->arg_begin()->getType()); 565 return true; 566 } 567 if (Name.startswith("arm.neon.vqaddu.")) { 568 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::uadd_sat, 569 F->arg_begin()->getType()); 570 return true; 571 } 572 if (Name.startswith("arm.neon.vqsubs.")) { 573 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ssub_sat, 574 F->arg_begin()->getType()); 575 return true; 576 } 577 if (Name.startswith("arm.neon.vqsubu.")) { 578 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::usub_sat, 579 F->arg_begin()->getType()); 580 return true; 581 } 582 if (Name.startswith("aarch64.neon.addp")) { 583 if (F->arg_size() != 2) 584 break; // Invalid IR. 585 auto fArgs = F->getFunctionType()->params(); 586 VectorType *ArgTy = dyn_cast<VectorType>(fArgs[0]); 587 if (ArgTy && ArgTy->getElementType()->isFloatingPointTy()) { 588 NewFn = Intrinsic::getDeclaration(F->getParent(), 589 Intrinsic::aarch64_neon_faddp, fArgs); 590 return true; 591 } 592 } 593 break; 594 } 595 596 case 'c': { 597 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 598 rename(F); 599 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 600 F->arg_begin()->getType()); 601 return true; 602 } 603 if (Name.startswith("cttz.") && F->arg_size() == 1) { 604 rename(F); 605 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 606 F->arg_begin()->getType()); 607 return true; 608 } 609 break; 610 } 611 case 'd': { 612 if (Name == "dbg.value" && F->arg_size() == 4) { 613 rename(F); 614 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value); 615 return true; 616 } 617 break; 618 } 619 case 'e': { 620 SmallVector<StringRef, 2> Groups; 621 static const Regex R("^experimental.vector.reduce.([a-z]+)\\.[fi][0-9]+"); 622 if (R.match(Name, &Groups)) { 623 Intrinsic::ID ID = Intrinsic::not_intrinsic; 624 if (Groups[1] == "fadd") 625 ID = Intrinsic::experimental_vector_reduce_v2_fadd; 626 if (Groups[1] == "fmul") 627 ID = Intrinsic::experimental_vector_reduce_v2_fmul; 628 629 if (ID != Intrinsic::not_intrinsic) { 630 rename(F); 631 auto Args = F->getFunctionType()->params(); 632 Type *Tys[] = {F->getFunctionType()->getReturnType(), Args[1]}; 633 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, Tys); 634 return true; 635 } 636 } 637 break; 638 } 639 case 'i': 640 case 'l': { 641 bool IsLifetimeStart = Name.startswith("lifetime.start"); 642 if (IsLifetimeStart || Name.startswith("invariant.start")) { 643 Intrinsic::ID ID = IsLifetimeStart ? 644 Intrinsic::lifetime_start : Intrinsic::invariant_start; 645 auto Args = F->getFunctionType()->params(); 646 Type* ObjectPtr[1] = {Args[1]}; 647 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 648 rename(F); 649 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 650 return true; 651 } 652 } 653 654 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 655 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 656 Intrinsic::ID ID = IsLifetimeEnd ? 657 Intrinsic::lifetime_end : Intrinsic::invariant_end; 658 659 auto Args = F->getFunctionType()->params(); 660 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 661 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 662 rename(F); 663 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 664 return true; 665 } 666 } 667 if (Name.startswith("invariant.group.barrier")) { 668 // Rename invariant.group.barrier to launder.invariant.group 669 auto Args = F->getFunctionType()->params(); 670 Type* ObjectPtr[1] = {Args[0]}; 671 rename(F); 672 NewFn = Intrinsic::getDeclaration(F->getParent(), 673 Intrinsic::launder_invariant_group, ObjectPtr); 674 return true; 675 676 } 677 678 break; 679 } 680 case 'm': { 681 if (Name.startswith("masked.load.")) { 682 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 683 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 684 rename(F); 685 NewFn = Intrinsic::getDeclaration(F->getParent(), 686 Intrinsic::masked_load, 687 Tys); 688 return true; 689 } 690 } 691 if (Name.startswith("masked.store.")) { 692 auto Args = F->getFunctionType()->params(); 693 Type *Tys[] = { Args[0], Args[1] }; 694 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 695 rename(F); 696 NewFn = Intrinsic::getDeclaration(F->getParent(), 697 Intrinsic::masked_store, 698 Tys); 699 return true; 700 } 701 } 702 // Renaming gather/scatter intrinsics with no address space overloading 703 // to the new overload which includes an address space 704 if (Name.startswith("masked.gather.")) { 705 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; 706 if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) { 707 rename(F); 708 NewFn = Intrinsic::getDeclaration(F->getParent(), 709 Intrinsic::masked_gather, Tys); 710 return true; 711 } 712 } 713 if (Name.startswith("masked.scatter.")) { 714 auto Args = F->getFunctionType()->params(); 715 Type *Tys[] = {Args[0], Args[1]}; 716 if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) { 717 rename(F); 718 NewFn = Intrinsic::getDeclaration(F->getParent(), 719 Intrinsic::masked_scatter, Tys); 720 return true; 721 } 722 } 723 // Updating the memory intrinsics (memcpy/memmove/memset) that have an 724 // alignment parameter to embedding the alignment as an attribute of 725 // the pointer args. 726 if (Name.startswith("memcpy.") && F->arg_size() == 5) { 727 rename(F); 728 // Get the types of dest, src, and len 729 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 730 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy, 731 ParamTypes); 732 return true; 733 } 734 if (Name.startswith("memmove.") && F->arg_size() == 5) { 735 rename(F); 736 // Get the types of dest, src, and len 737 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 738 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove, 739 ParamTypes); 740 return true; 741 } 742 if (Name.startswith("memset.") && F->arg_size() == 5) { 743 rename(F); 744 // Get the types of dest, and len 745 const auto *FT = F->getFunctionType(); 746 Type *ParamTypes[2] = { 747 FT->getParamType(0), // Dest 748 FT->getParamType(2) // len 749 }; 750 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset, 751 ParamTypes); 752 return true; 753 } 754 break; 755 } 756 case 'n': { 757 if (Name.startswith("nvvm.")) { 758 Name = Name.substr(5); 759 760 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 761 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 762 .Cases("brev32", "brev64", Intrinsic::bitreverse) 763 .Case("clz.i", Intrinsic::ctlz) 764 .Case("popc.i", Intrinsic::ctpop) 765 .Default(Intrinsic::not_intrinsic); 766 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 767 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 768 {F->getReturnType()}); 769 return true; 770 } 771 772 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 773 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 774 // 775 // TODO: We could add lohi.i2d. 776 bool Expand = StringSwitch<bool>(Name) 777 .Cases("abs.i", "abs.ll", true) 778 .Cases("clz.ll", "popc.ll", "h2f", true) 779 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 780 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 781 .StartsWith("atomic.load.add.f32.p", true) 782 .StartsWith("atomic.load.add.f64.p", true) 783 .Default(false); 784 if (Expand) { 785 NewFn = nullptr; 786 return true; 787 } 788 } 789 break; 790 } 791 case 'o': 792 // We only need to change the name to match the mangling including the 793 // address space. 794 if (Name.startswith("objectsize.")) { 795 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 796 if (F->arg_size() == 2 || F->arg_size() == 3 || 797 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 798 rename(F); 799 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 800 Tys); 801 return true; 802 } 803 } 804 break; 805 806 case 'p': 807 if (Name == "prefetch") { 808 // Handle address space overloading. 809 Type *Tys[] = {F->arg_begin()->getType()}; 810 if (F->getName() != Intrinsic::getName(Intrinsic::prefetch, Tys)) { 811 rename(F); 812 NewFn = 813 Intrinsic::getDeclaration(F->getParent(), Intrinsic::prefetch, Tys); 814 return true; 815 } 816 } 817 break; 818 819 case 's': 820 if (Name == "stackprotectorcheck") { 821 NewFn = nullptr; 822 return true; 823 } 824 break; 825 826 case 'x': 827 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 828 return true; 829 } 830 // Remangle our intrinsic since we upgrade the mangling 831 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 832 if (Result != None) { 833 NewFn = Result.getValue(); 834 return true; 835 } 836 837 // This may not belong here. This function is effectively being overloaded 838 // to both detect an intrinsic which needs upgrading, and to provide the 839 // upgraded form of the intrinsic. We should perhaps have two separate 840 // functions for this. 841 return false; 842 } 843 844 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 845 NewFn = nullptr; 846 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 847 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 848 849 // Upgrade intrinsic attributes. This does not change the function. 850 if (NewFn) 851 F = NewFn; 852 if (Intrinsic::ID id = F->getIntrinsicID()) 853 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 854 return Upgraded; 855 } 856 857 GlobalVariable *llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 858 if (!(GV->hasName() && (GV->getName() == "llvm.global_ctors" || 859 GV->getName() == "llvm.global_dtors")) || 860 !GV->hasInitializer()) 861 return nullptr; 862 ArrayType *ATy = dyn_cast<ArrayType>(GV->getValueType()); 863 if (!ATy) 864 return nullptr; 865 StructType *STy = dyn_cast<StructType>(ATy->getElementType()); 866 if (!STy || STy->getNumElements() != 2) 867 return nullptr; 868 869 LLVMContext &C = GV->getContext(); 870 IRBuilder<> IRB(C); 871 auto EltTy = StructType::get(STy->getElementType(0), STy->getElementType(1), 872 IRB.getInt8PtrTy()); 873 Constant *Init = GV->getInitializer(); 874 unsigned N = Init->getNumOperands(); 875 std::vector<Constant *> NewCtors(N); 876 for (unsigned i = 0; i != N; ++i) { 877 auto Ctor = cast<Constant>(Init->getOperand(i)); 878 NewCtors[i] = ConstantStruct::get( 879 EltTy, Ctor->getAggregateElement(0u), Ctor->getAggregateElement(1), 880 Constant::getNullValue(IRB.getInt8PtrTy())); 881 } 882 Constant *NewInit = ConstantArray::get(ArrayType::get(EltTy, N), NewCtors); 883 884 return new GlobalVariable(NewInit->getType(), false, GV->getLinkage(), 885 NewInit, GV->getName()); 886 } 887 888 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 889 // to byte shuffles. 890 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 891 Value *Op, unsigned Shift) { 892 Type *ResultTy = Op->getType(); 893 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 894 895 // Bitcast from a 64-bit element type to a byte element type. 896 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 897 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 898 899 // We'll be shuffling in zeroes. 900 Value *Res = Constant::getNullValue(VecTy); 901 902 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 903 // we'll just return the zero vector. 904 if (Shift < 16) { 905 uint32_t Idxs[64]; 906 // 256/512-bit version is split into 2/4 16-byte lanes. 907 for (unsigned l = 0; l != NumElts; l += 16) 908 for (unsigned i = 0; i != 16; ++i) { 909 unsigned Idx = NumElts + i - Shift; 910 if (Idx < NumElts) 911 Idx -= NumElts - 16; // end of lane, switch operand. 912 Idxs[l + i] = Idx + l; 913 } 914 915 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 916 } 917 918 // Bitcast back to a 64-bit element type. 919 return Builder.CreateBitCast(Res, ResultTy, "cast"); 920 } 921 922 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 923 // to byte shuffles. 924 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 925 unsigned Shift) { 926 Type *ResultTy = Op->getType(); 927 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 928 929 // Bitcast from a 64-bit element type to a byte element type. 930 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 931 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 932 933 // We'll be shuffling in zeroes. 934 Value *Res = Constant::getNullValue(VecTy); 935 936 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 937 // we'll just return the zero vector. 938 if (Shift < 16) { 939 uint32_t Idxs[64]; 940 // 256/512-bit version is split into 2/4 16-byte lanes. 941 for (unsigned l = 0; l != NumElts; l += 16) 942 for (unsigned i = 0; i != 16; ++i) { 943 unsigned Idx = i + Shift; 944 if (Idx >= 16) 945 Idx += NumElts - 16; // end of lane, switch operand. 946 Idxs[l + i] = Idx + l; 947 } 948 949 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 950 } 951 952 // Bitcast back to a 64-bit element type. 953 return Builder.CreateBitCast(Res, ResultTy, "cast"); 954 } 955 956 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 957 unsigned NumElts) { 958 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 959 cast<IntegerType>(Mask->getType())->getBitWidth()); 960 Mask = Builder.CreateBitCast(Mask, MaskTy); 961 962 // If we have less than 8 elements, then the starting mask was an i8 and 963 // we need to extract down to the right number of elements. 964 if (NumElts < 8) { 965 uint32_t Indices[4]; 966 for (unsigned i = 0; i != NumElts; ++i) 967 Indices[i] = i; 968 Mask = Builder.CreateShuffleVector(Mask, Mask, 969 makeArrayRef(Indices, NumElts), 970 "extract"); 971 } 972 973 return Mask; 974 } 975 976 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 977 Value *Op0, Value *Op1) { 978 // If the mask is all ones just emit the first operation. 979 if (const auto *C = dyn_cast<Constant>(Mask)) 980 if (C->isAllOnesValue()) 981 return Op0; 982 983 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 984 return Builder.CreateSelect(Mask, Op0, Op1); 985 } 986 987 static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, 988 Value *Op0, Value *Op1) { 989 // If the mask is all ones just emit the first operation. 990 if (const auto *C = dyn_cast<Constant>(Mask)) 991 if (C->isAllOnesValue()) 992 return Op0; 993 994 llvm::VectorType *MaskTy = 995 llvm::VectorType::get(Builder.getInt1Ty(), 996 Mask->getType()->getIntegerBitWidth()); 997 Mask = Builder.CreateBitCast(Mask, MaskTy); 998 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0); 999 return Builder.CreateSelect(Mask, Op0, Op1); 1000 } 1001 1002 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 1003 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 1004 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 1005 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 1006 Value *Op1, Value *Shift, 1007 Value *Passthru, Value *Mask, 1008 bool IsVALIGN) { 1009 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 1010 1011 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1012 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 1013 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 1014 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 1015 1016 // Mask the immediate for VALIGN. 1017 if (IsVALIGN) 1018 ShiftVal &= (NumElts - 1); 1019 1020 // If palignr is shifting the pair of vectors more than the size of two 1021 // lanes, emit zero. 1022 if (ShiftVal >= 32) 1023 return llvm::Constant::getNullValue(Op0->getType()); 1024 1025 // If palignr is shifting the pair of input vectors more than one lane, 1026 // but less than two lanes, convert to shifting in zeroes. 1027 if (ShiftVal > 16) { 1028 ShiftVal -= 16; 1029 Op1 = Op0; 1030 Op0 = llvm::Constant::getNullValue(Op0->getType()); 1031 } 1032 1033 uint32_t Indices[64]; 1034 // 256-bit palignr operates on 128-bit lanes so we need to handle that 1035 for (unsigned l = 0; l < NumElts; l += 16) { 1036 for (unsigned i = 0; i != 16; ++i) { 1037 unsigned Idx = ShiftVal + i; 1038 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 1039 Idx += NumElts - 16; // End of lane, switch operand. 1040 Indices[l + i] = Idx + l; 1041 } 1042 } 1043 1044 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 1045 makeArrayRef(Indices, NumElts), 1046 "palignr"); 1047 1048 return EmitX86Select(Builder, Mask, Align, Passthru); 1049 } 1050 1051 static Value *UpgradeX86VPERMT2Intrinsics(IRBuilder<> &Builder, CallInst &CI, 1052 bool ZeroMask, bool IndexForm) { 1053 Type *Ty = CI.getType(); 1054 unsigned VecWidth = Ty->getPrimitiveSizeInBits(); 1055 unsigned EltWidth = Ty->getScalarSizeInBits(); 1056 bool IsFloat = Ty->isFPOrFPVectorTy(); 1057 Intrinsic::ID IID; 1058 if (VecWidth == 128 && EltWidth == 32 && IsFloat) 1059 IID = Intrinsic::x86_avx512_vpermi2var_ps_128; 1060 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat) 1061 IID = Intrinsic::x86_avx512_vpermi2var_d_128; 1062 else if (VecWidth == 128 && EltWidth == 64 && IsFloat) 1063 IID = Intrinsic::x86_avx512_vpermi2var_pd_128; 1064 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat) 1065 IID = Intrinsic::x86_avx512_vpermi2var_q_128; 1066 else if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1067 IID = Intrinsic::x86_avx512_vpermi2var_ps_256; 1068 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1069 IID = Intrinsic::x86_avx512_vpermi2var_d_256; 1070 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1071 IID = Intrinsic::x86_avx512_vpermi2var_pd_256; 1072 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1073 IID = Intrinsic::x86_avx512_vpermi2var_q_256; 1074 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1075 IID = Intrinsic::x86_avx512_vpermi2var_ps_512; 1076 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1077 IID = Intrinsic::x86_avx512_vpermi2var_d_512; 1078 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1079 IID = Intrinsic::x86_avx512_vpermi2var_pd_512; 1080 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1081 IID = Intrinsic::x86_avx512_vpermi2var_q_512; 1082 else if (VecWidth == 128 && EltWidth == 16) 1083 IID = Intrinsic::x86_avx512_vpermi2var_hi_128; 1084 else if (VecWidth == 256 && EltWidth == 16) 1085 IID = Intrinsic::x86_avx512_vpermi2var_hi_256; 1086 else if (VecWidth == 512 && EltWidth == 16) 1087 IID = Intrinsic::x86_avx512_vpermi2var_hi_512; 1088 else if (VecWidth == 128 && EltWidth == 8) 1089 IID = Intrinsic::x86_avx512_vpermi2var_qi_128; 1090 else if (VecWidth == 256 && EltWidth == 8) 1091 IID = Intrinsic::x86_avx512_vpermi2var_qi_256; 1092 else if (VecWidth == 512 && EltWidth == 8) 1093 IID = Intrinsic::x86_avx512_vpermi2var_qi_512; 1094 else 1095 llvm_unreachable("Unexpected intrinsic"); 1096 1097 Value *Args[] = { CI.getArgOperand(0) , CI.getArgOperand(1), 1098 CI.getArgOperand(2) }; 1099 1100 // If this isn't index form we need to swap operand 0 and 1. 1101 if (!IndexForm) 1102 std::swap(Args[0], Args[1]); 1103 1104 Value *V = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1105 Args); 1106 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(Ty) 1107 : Builder.CreateBitCast(CI.getArgOperand(1), 1108 Ty); 1109 return EmitX86Select(Builder, CI.getArgOperand(3), V, PassThru); 1110 } 1111 1112 static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI, 1113 bool IsSigned, bool IsAddition) { 1114 Type *Ty = CI.getType(); 1115 Value *Op0 = CI.getOperand(0); 1116 Value *Op1 = CI.getOperand(1); 1117 1118 Intrinsic::ID IID = 1119 IsSigned ? (IsAddition ? Intrinsic::sadd_sat : Intrinsic::ssub_sat) 1120 : (IsAddition ? Intrinsic::uadd_sat : Intrinsic::usub_sat); 1121 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1122 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1}); 1123 1124 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1125 Value *VecSrc = CI.getOperand(2); 1126 Value *Mask = CI.getOperand(3); 1127 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1128 } 1129 return Res; 1130 } 1131 1132 static Value *upgradeX86Rotate(IRBuilder<> &Builder, CallInst &CI, 1133 bool IsRotateRight) { 1134 Type *Ty = CI.getType(); 1135 Value *Src = CI.getArgOperand(0); 1136 Value *Amt = CI.getArgOperand(1); 1137 1138 // Amount may be scalar immediate, in which case create a splat vector. 1139 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1140 // we only care about the lowest log2 bits anyway. 1141 if (Amt->getType() != Ty) { 1142 unsigned NumElts = Ty->getVectorNumElements(); 1143 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1144 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1145 } 1146 1147 Intrinsic::ID IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl; 1148 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1149 Value *Res = Builder.CreateCall(Intrin, {Src, Src, Amt}); 1150 1151 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 1152 Value *VecSrc = CI.getOperand(2); 1153 Value *Mask = CI.getOperand(3); 1154 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1155 } 1156 return Res; 1157 } 1158 1159 static Value *upgradeX86vpcom(IRBuilder<> &Builder, CallInst &CI, unsigned Imm, 1160 bool IsSigned) { 1161 Type *Ty = CI.getType(); 1162 Value *LHS = CI.getArgOperand(0); 1163 Value *RHS = CI.getArgOperand(1); 1164 1165 CmpInst::Predicate Pred; 1166 switch (Imm) { 1167 case 0x0: 1168 Pred = IsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 1169 break; 1170 case 0x1: 1171 Pred = IsSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; 1172 break; 1173 case 0x2: 1174 Pred = IsSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 1175 break; 1176 case 0x3: 1177 Pred = IsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; 1178 break; 1179 case 0x4: 1180 Pred = ICmpInst::ICMP_EQ; 1181 break; 1182 case 0x5: 1183 Pred = ICmpInst::ICMP_NE; 1184 break; 1185 case 0x6: 1186 return Constant::getNullValue(Ty); // FALSE 1187 case 0x7: 1188 return Constant::getAllOnesValue(Ty); // TRUE 1189 default: 1190 llvm_unreachable("Unknown XOP vpcom/vpcomu predicate"); 1191 } 1192 1193 Value *Cmp = Builder.CreateICmp(Pred, LHS, RHS); 1194 Value *Ext = Builder.CreateSExt(Cmp, Ty); 1195 return Ext; 1196 } 1197 1198 static Value *upgradeX86ConcatShift(IRBuilder<> &Builder, CallInst &CI, 1199 bool IsShiftRight, bool ZeroMask) { 1200 Type *Ty = CI.getType(); 1201 Value *Op0 = CI.getArgOperand(0); 1202 Value *Op1 = CI.getArgOperand(1); 1203 Value *Amt = CI.getArgOperand(2); 1204 1205 if (IsShiftRight) 1206 std::swap(Op0, Op1); 1207 1208 // Amount may be scalar immediate, in which case create a splat vector. 1209 // Funnel shifts amounts are treated as modulo and types are all power-of-2 so 1210 // we only care about the lowest log2 bits anyway. 1211 if (Amt->getType() != Ty) { 1212 unsigned NumElts = Ty->getVectorNumElements(); 1213 Amt = Builder.CreateIntCast(Amt, Ty->getScalarType(), false); 1214 Amt = Builder.CreateVectorSplat(NumElts, Amt); 1215 } 1216 1217 Intrinsic::ID IID = IsShiftRight ? Intrinsic::fshr : Intrinsic::fshl; 1218 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID, Ty); 1219 Value *Res = Builder.CreateCall(Intrin, {Op0, Op1, Amt}); 1220 1221 unsigned NumArgs = CI.getNumArgOperands(); 1222 if (NumArgs >= 4) { // For masked intrinsics. 1223 Value *VecSrc = NumArgs == 5 ? CI.getArgOperand(3) : 1224 ZeroMask ? ConstantAggregateZero::get(CI.getType()) : 1225 CI.getArgOperand(0); 1226 Value *Mask = CI.getOperand(NumArgs - 1); 1227 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 1228 } 1229 return Res; 1230 } 1231 1232 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 1233 Value *Ptr, Value *Data, Value *Mask, 1234 bool Aligned) { 1235 // Cast the pointer to the right type. 1236 Ptr = Builder.CreateBitCast(Ptr, 1237 llvm::PointerType::getUnqual(Data->getType())); 1238 unsigned Align = 1239 Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1; 1240 1241 // If the mask is all ones just emit a regular store. 1242 if (const auto *C = dyn_cast<Constant>(Mask)) 1243 if (C->isAllOnesValue()) 1244 return Builder.CreateAlignedStore(Data, Ptr, Align); 1245 1246 // Convert the mask from an integer type to a vector of i1. 1247 unsigned NumElts = Data->getType()->getVectorNumElements(); 1248 Mask = getX86MaskVec(Builder, Mask, NumElts); 1249 return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); 1250 } 1251 1252 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 1253 Value *Ptr, Value *Passthru, Value *Mask, 1254 bool Aligned) { 1255 Type *ValTy = Passthru->getType(); 1256 // Cast the pointer to the right type. 1257 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(ValTy)); 1258 unsigned Align = 1259 Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1; 1260 1261 // If the mask is all ones just emit a regular store. 1262 if (const auto *C = dyn_cast<Constant>(Mask)) 1263 if (C->isAllOnesValue()) 1264 return Builder.CreateAlignedLoad(ValTy, Ptr, Align); 1265 1266 // Convert the mask from an integer type to a vector of i1. 1267 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 1268 Mask = getX86MaskVec(Builder, Mask, NumElts); 1269 return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); 1270 } 1271 1272 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { 1273 Value *Op0 = CI.getArgOperand(0); 1274 llvm::Type *Ty = Op0->getType(); 1275 Value *Zero = llvm::Constant::getNullValue(Ty); 1276 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero); 1277 Value *Neg = Builder.CreateNeg(Op0); 1278 Value *Res = Builder.CreateSelect(Cmp, Op0, Neg); 1279 1280 if (CI.getNumArgOperands() == 3) 1281 Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1)); 1282 1283 return Res; 1284 } 1285 1286 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 1287 ICmpInst::Predicate Pred) { 1288 Value *Op0 = CI.getArgOperand(0); 1289 Value *Op1 = CI.getArgOperand(1); 1290 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 1291 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 1292 1293 if (CI.getNumArgOperands() == 4) 1294 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1295 1296 return Res; 1297 } 1298 1299 static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) { 1300 Type *Ty = CI.getType(); 1301 1302 // Arguments have a vXi32 type so cast to vXi64. 1303 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty); 1304 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty); 1305 1306 if (IsSigned) { 1307 // Shift left then arithmetic shift right. 1308 Constant *ShiftAmt = ConstantInt::get(Ty, 32); 1309 LHS = Builder.CreateShl(LHS, ShiftAmt); 1310 LHS = Builder.CreateAShr(LHS, ShiftAmt); 1311 RHS = Builder.CreateShl(RHS, ShiftAmt); 1312 RHS = Builder.CreateAShr(RHS, ShiftAmt); 1313 } else { 1314 // Clear the upper bits. 1315 Constant *Mask = ConstantInt::get(Ty, 0xffffffff); 1316 LHS = Builder.CreateAnd(LHS, Mask); 1317 RHS = Builder.CreateAnd(RHS, Mask); 1318 } 1319 1320 Value *Res = Builder.CreateMul(LHS, RHS); 1321 1322 if (CI.getNumArgOperands() == 4) 1323 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1324 1325 return Res; 1326 } 1327 1328 // Applying mask on vector of i1's and make sure result is at least 8 bits wide. 1329 static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, 1330 Value *Mask) { 1331 unsigned NumElts = Vec->getType()->getVectorNumElements(); 1332 if (Mask) { 1333 const auto *C = dyn_cast<Constant>(Mask); 1334 if (!C || !C->isAllOnesValue()) 1335 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts)); 1336 } 1337 1338 if (NumElts < 8) { 1339 uint32_t Indices[8]; 1340 for (unsigned i = 0; i != NumElts; ++i) 1341 Indices[i] = i; 1342 for (unsigned i = NumElts; i != 8; ++i) 1343 Indices[i] = NumElts + i % NumElts; 1344 Vec = Builder.CreateShuffleVector(Vec, 1345 Constant::getNullValue(Vec->getType()), 1346 Indices); 1347 } 1348 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U))); 1349 } 1350 1351 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 1352 unsigned CC, bool Signed) { 1353 Value *Op0 = CI.getArgOperand(0); 1354 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1355 1356 Value *Cmp; 1357 if (CC == 3) { 1358 Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1359 } else if (CC == 7) { 1360 Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1361 } else { 1362 ICmpInst::Predicate Pred; 1363 switch (CC) { 1364 default: llvm_unreachable("Unknown condition code"); 1365 case 0: Pred = ICmpInst::ICMP_EQ; break; 1366 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 1367 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 1368 case 4: Pred = ICmpInst::ICMP_NE; break; 1369 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 1370 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 1371 } 1372 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 1373 } 1374 1375 Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1); 1376 1377 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask); 1378 } 1379 1380 // Replace a masked intrinsic with an older unmasked intrinsic. 1381 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 1382 Intrinsic::ID IID) { 1383 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID); 1384 Value *Rep = Builder.CreateCall(Intrin, 1385 { CI.getArgOperand(0), CI.getArgOperand(1) }); 1386 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 1387 } 1388 1389 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 1390 Value* A = CI.getArgOperand(0); 1391 Value* B = CI.getArgOperand(1); 1392 Value* Src = CI.getArgOperand(2); 1393 Value* Mask = CI.getArgOperand(3); 1394 1395 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 1396 Value* Cmp = Builder.CreateIsNotNull(AndNode); 1397 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 1398 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 1399 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 1400 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 1401 } 1402 1403 1404 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 1405 Value* Op = CI.getArgOperand(0); 1406 Type* ReturnOp = CI.getType(); 1407 unsigned NumElts = CI.getType()->getVectorNumElements(); 1408 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 1409 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 1410 } 1411 1412 // Replace intrinsic with unmasked version and a select. 1413 static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, 1414 CallInst &CI, Value *&Rep) { 1415 Name = Name.substr(12); // Remove avx512.mask. 1416 1417 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits(); 1418 unsigned EltWidth = CI.getType()->getScalarSizeInBits(); 1419 Intrinsic::ID IID; 1420 if (Name.startswith("max.p")) { 1421 if (VecWidth == 128 && EltWidth == 32) 1422 IID = Intrinsic::x86_sse_max_ps; 1423 else if (VecWidth == 128 && EltWidth == 64) 1424 IID = Intrinsic::x86_sse2_max_pd; 1425 else if (VecWidth == 256 && EltWidth == 32) 1426 IID = Intrinsic::x86_avx_max_ps_256; 1427 else if (VecWidth == 256 && EltWidth == 64) 1428 IID = Intrinsic::x86_avx_max_pd_256; 1429 else 1430 llvm_unreachable("Unexpected intrinsic"); 1431 } else if (Name.startswith("min.p")) { 1432 if (VecWidth == 128 && EltWidth == 32) 1433 IID = Intrinsic::x86_sse_min_ps; 1434 else if (VecWidth == 128 && EltWidth == 64) 1435 IID = Intrinsic::x86_sse2_min_pd; 1436 else if (VecWidth == 256 && EltWidth == 32) 1437 IID = Intrinsic::x86_avx_min_ps_256; 1438 else if (VecWidth == 256 && EltWidth == 64) 1439 IID = Intrinsic::x86_avx_min_pd_256; 1440 else 1441 llvm_unreachable("Unexpected intrinsic"); 1442 } else if (Name.startswith("pshuf.b.")) { 1443 if (VecWidth == 128) 1444 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1445 else if (VecWidth == 256) 1446 IID = Intrinsic::x86_avx2_pshuf_b; 1447 else if (VecWidth == 512) 1448 IID = Intrinsic::x86_avx512_pshuf_b_512; 1449 else 1450 llvm_unreachable("Unexpected intrinsic"); 1451 } else if (Name.startswith("pmul.hr.sw.")) { 1452 if (VecWidth == 128) 1453 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128; 1454 else if (VecWidth == 256) 1455 IID = Intrinsic::x86_avx2_pmul_hr_sw; 1456 else if (VecWidth == 512) 1457 IID = Intrinsic::x86_avx512_pmul_hr_sw_512; 1458 else 1459 llvm_unreachable("Unexpected intrinsic"); 1460 } else if (Name.startswith("pmulh.w.")) { 1461 if (VecWidth == 128) 1462 IID = Intrinsic::x86_sse2_pmulh_w; 1463 else if (VecWidth == 256) 1464 IID = Intrinsic::x86_avx2_pmulh_w; 1465 else if (VecWidth == 512) 1466 IID = Intrinsic::x86_avx512_pmulh_w_512; 1467 else 1468 llvm_unreachable("Unexpected intrinsic"); 1469 } else if (Name.startswith("pmulhu.w.")) { 1470 if (VecWidth == 128) 1471 IID = Intrinsic::x86_sse2_pmulhu_w; 1472 else if (VecWidth == 256) 1473 IID = Intrinsic::x86_avx2_pmulhu_w; 1474 else if (VecWidth == 512) 1475 IID = Intrinsic::x86_avx512_pmulhu_w_512; 1476 else 1477 llvm_unreachable("Unexpected intrinsic"); 1478 } else if (Name.startswith("pmaddw.d.")) { 1479 if (VecWidth == 128) 1480 IID = Intrinsic::x86_sse2_pmadd_wd; 1481 else if (VecWidth == 256) 1482 IID = Intrinsic::x86_avx2_pmadd_wd; 1483 else if (VecWidth == 512) 1484 IID = Intrinsic::x86_avx512_pmaddw_d_512; 1485 else 1486 llvm_unreachable("Unexpected intrinsic"); 1487 } else if (Name.startswith("pmaddubs.w.")) { 1488 if (VecWidth == 128) 1489 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128; 1490 else if (VecWidth == 256) 1491 IID = Intrinsic::x86_avx2_pmadd_ub_sw; 1492 else if (VecWidth == 512) 1493 IID = Intrinsic::x86_avx512_pmaddubs_w_512; 1494 else 1495 llvm_unreachable("Unexpected intrinsic"); 1496 } else if (Name.startswith("packsswb.")) { 1497 if (VecWidth == 128) 1498 IID = Intrinsic::x86_sse2_packsswb_128; 1499 else if (VecWidth == 256) 1500 IID = Intrinsic::x86_avx2_packsswb; 1501 else if (VecWidth == 512) 1502 IID = Intrinsic::x86_avx512_packsswb_512; 1503 else 1504 llvm_unreachable("Unexpected intrinsic"); 1505 } else if (Name.startswith("packssdw.")) { 1506 if (VecWidth == 128) 1507 IID = Intrinsic::x86_sse2_packssdw_128; 1508 else if (VecWidth == 256) 1509 IID = Intrinsic::x86_avx2_packssdw; 1510 else if (VecWidth == 512) 1511 IID = Intrinsic::x86_avx512_packssdw_512; 1512 else 1513 llvm_unreachable("Unexpected intrinsic"); 1514 } else if (Name.startswith("packuswb.")) { 1515 if (VecWidth == 128) 1516 IID = Intrinsic::x86_sse2_packuswb_128; 1517 else if (VecWidth == 256) 1518 IID = Intrinsic::x86_avx2_packuswb; 1519 else if (VecWidth == 512) 1520 IID = Intrinsic::x86_avx512_packuswb_512; 1521 else 1522 llvm_unreachable("Unexpected intrinsic"); 1523 } else if (Name.startswith("packusdw.")) { 1524 if (VecWidth == 128) 1525 IID = Intrinsic::x86_sse41_packusdw; 1526 else if (VecWidth == 256) 1527 IID = Intrinsic::x86_avx2_packusdw; 1528 else if (VecWidth == 512) 1529 IID = Intrinsic::x86_avx512_packusdw_512; 1530 else 1531 llvm_unreachable("Unexpected intrinsic"); 1532 } else if (Name.startswith("vpermilvar.")) { 1533 if (VecWidth == 128 && EltWidth == 32) 1534 IID = Intrinsic::x86_avx_vpermilvar_ps; 1535 else if (VecWidth == 128 && EltWidth == 64) 1536 IID = Intrinsic::x86_avx_vpermilvar_pd; 1537 else if (VecWidth == 256 && EltWidth == 32) 1538 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 1539 else if (VecWidth == 256 && EltWidth == 64) 1540 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 1541 else if (VecWidth == 512 && EltWidth == 32) 1542 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 1543 else if (VecWidth == 512 && EltWidth == 64) 1544 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 1545 else 1546 llvm_unreachable("Unexpected intrinsic"); 1547 } else if (Name == "cvtpd2dq.256") { 1548 IID = Intrinsic::x86_avx_cvt_pd2dq_256; 1549 } else if (Name == "cvtpd2ps.256") { 1550 IID = Intrinsic::x86_avx_cvt_pd2_ps_256; 1551 } else if (Name == "cvttpd2dq.256") { 1552 IID = Intrinsic::x86_avx_cvtt_pd2dq_256; 1553 } else if (Name == "cvttps2dq.128") { 1554 IID = Intrinsic::x86_sse2_cvttps2dq; 1555 } else if (Name == "cvttps2dq.256") { 1556 IID = Intrinsic::x86_avx_cvtt_ps2dq_256; 1557 } else if (Name.startswith("permvar.")) { 1558 bool IsFloat = CI.getType()->isFPOrFPVectorTy(); 1559 if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1560 IID = Intrinsic::x86_avx2_permps; 1561 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1562 IID = Intrinsic::x86_avx2_permd; 1563 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1564 IID = Intrinsic::x86_avx512_permvar_df_256; 1565 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1566 IID = Intrinsic::x86_avx512_permvar_di_256; 1567 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1568 IID = Intrinsic::x86_avx512_permvar_sf_512; 1569 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1570 IID = Intrinsic::x86_avx512_permvar_si_512; 1571 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1572 IID = Intrinsic::x86_avx512_permvar_df_512; 1573 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1574 IID = Intrinsic::x86_avx512_permvar_di_512; 1575 else if (VecWidth == 128 && EltWidth == 16) 1576 IID = Intrinsic::x86_avx512_permvar_hi_128; 1577 else if (VecWidth == 256 && EltWidth == 16) 1578 IID = Intrinsic::x86_avx512_permvar_hi_256; 1579 else if (VecWidth == 512 && EltWidth == 16) 1580 IID = Intrinsic::x86_avx512_permvar_hi_512; 1581 else if (VecWidth == 128 && EltWidth == 8) 1582 IID = Intrinsic::x86_avx512_permvar_qi_128; 1583 else if (VecWidth == 256 && EltWidth == 8) 1584 IID = Intrinsic::x86_avx512_permvar_qi_256; 1585 else if (VecWidth == 512 && EltWidth == 8) 1586 IID = Intrinsic::x86_avx512_permvar_qi_512; 1587 else 1588 llvm_unreachable("Unexpected intrinsic"); 1589 } else if (Name.startswith("dbpsadbw.")) { 1590 if (VecWidth == 128) 1591 IID = Intrinsic::x86_avx512_dbpsadbw_128; 1592 else if (VecWidth == 256) 1593 IID = Intrinsic::x86_avx512_dbpsadbw_256; 1594 else if (VecWidth == 512) 1595 IID = Intrinsic::x86_avx512_dbpsadbw_512; 1596 else 1597 llvm_unreachable("Unexpected intrinsic"); 1598 } else if (Name.startswith("pmultishift.qb.")) { 1599 if (VecWidth == 128) 1600 IID = Intrinsic::x86_avx512_pmultishift_qb_128; 1601 else if (VecWidth == 256) 1602 IID = Intrinsic::x86_avx512_pmultishift_qb_256; 1603 else if (VecWidth == 512) 1604 IID = Intrinsic::x86_avx512_pmultishift_qb_512; 1605 else 1606 llvm_unreachable("Unexpected intrinsic"); 1607 } else if (Name.startswith("conflict.")) { 1608 if (Name[9] == 'd' && VecWidth == 128) 1609 IID = Intrinsic::x86_avx512_conflict_d_128; 1610 else if (Name[9] == 'd' && VecWidth == 256) 1611 IID = Intrinsic::x86_avx512_conflict_d_256; 1612 else if (Name[9] == 'd' && VecWidth == 512) 1613 IID = Intrinsic::x86_avx512_conflict_d_512; 1614 else if (Name[9] == 'q' && VecWidth == 128) 1615 IID = Intrinsic::x86_avx512_conflict_q_128; 1616 else if (Name[9] == 'q' && VecWidth == 256) 1617 IID = Intrinsic::x86_avx512_conflict_q_256; 1618 else if (Name[9] == 'q' && VecWidth == 512) 1619 IID = Intrinsic::x86_avx512_conflict_q_512; 1620 else 1621 llvm_unreachable("Unexpected intrinsic"); 1622 } else if (Name.startswith("pavg.")) { 1623 if (Name[5] == 'b' && VecWidth == 128) 1624 IID = Intrinsic::x86_sse2_pavg_b; 1625 else if (Name[5] == 'b' && VecWidth == 256) 1626 IID = Intrinsic::x86_avx2_pavg_b; 1627 else if (Name[5] == 'b' && VecWidth == 512) 1628 IID = Intrinsic::x86_avx512_pavg_b_512; 1629 else if (Name[5] == 'w' && VecWidth == 128) 1630 IID = Intrinsic::x86_sse2_pavg_w; 1631 else if (Name[5] == 'w' && VecWidth == 256) 1632 IID = Intrinsic::x86_avx2_pavg_w; 1633 else if (Name[5] == 'w' && VecWidth == 512) 1634 IID = Intrinsic::x86_avx512_pavg_w_512; 1635 else 1636 llvm_unreachable("Unexpected intrinsic"); 1637 } else 1638 return false; 1639 1640 SmallVector<Value *, 4> Args(CI.arg_operands().begin(), 1641 CI.arg_operands().end()); 1642 Args.pop_back(); 1643 Args.pop_back(); 1644 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1645 Args); 1646 unsigned NumArgs = CI.getNumArgOperands(); 1647 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep, 1648 CI.getArgOperand(NumArgs - 2)); 1649 return true; 1650 } 1651 1652 /// Upgrade comment in call to inline asm that represents an objc retain release 1653 /// marker. 1654 void llvm::UpgradeInlineAsmString(std::string *AsmStr) { 1655 size_t Pos; 1656 if (AsmStr->find("mov\tfp") == 0 && 1657 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos && 1658 (Pos = AsmStr->find("# marker")) != std::string::npos) { 1659 AsmStr->replace(Pos, 1, ";"); 1660 } 1661 return; 1662 } 1663 1664 /// Upgrade a call to an old intrinsic. All argument and return casting must be 1665 /// provided to seamlessly integrate with existing context. 1666 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 1667 Function *F = CI->getCalledFunction(); 1668 LLVMContext &C = CI->getContext(); 1669 IRBuilder<> Builder(C); 1670 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 1671 1672 assert(F && "Intrinsic call is not direct?"); 1673 1674 if (!NewFn) { 1675 // Get the Function's name. 1676 StringRef Name = F->getName(); 1677 1678 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 1679 Name = Name.substr(5); 1680 1681 bool IsX86 = Name.startswith("x86."); 1682 if (IsX86) 1683 Name = Name.substr(4); 1684 bool IsNVVM = Name.startswith("nvvm."); 1685 if (IsNVVM) 1686 Name = Name.substr(5); 1687 1688 if (IsX86 && Name.startswith("sse4a.movnt.")) { 1689 Module *M = F->getParent(); 1690 SmallVector<Metadata *, 1> Elts; 1691 Elts.push_back( 1692 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1693 MDNode *Node = MDNode::get(C, Elts); 1694 1695 Value *Arg0 = CI->getArgOperand(0); 1696 Value *Arg1 = CI->getArgOperand(1); 1697 1698 // Nontemporal (unaligned) store of the 0'th element of the float/double 1699 // vector. 1700 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 1701 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 1702 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 1703 Value *Extract = 1704 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 1705 1706 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); 1707 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1708 1709 // Remove intrinsic. 1710 CI->eraseFromParent(); 1711 return; 1712 } 1713 1714 if (IsX86 && (Name.startswith("avx.movnt.") || 1715 Name.startswith("avx512.storent."))) { 1716 Module *M = F->getParent(); 1717 SmallVector<Metadata *, 1> Elts; 1718 Elts.push_back( 1719 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1720 MDNode *Node = MDNode::get(C, Elts); 1721 1722 Value *Arg0 = CI->getArgOperand(0); 1723 Value *Arg1 = CI->getArgOperand(1); 1724 1725 // Convert the type of the pointer to a pointer to the stored type. 1726 Value *BC = Builder.CreateBitCast(Arg0, 1727 PointerType::getUnqual(Arg1->getType()), 1728 "cast"); 1729 VectorType *VTy = cast<VectorType>(Arg1->getType()); 1730 StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 1731 VTy->getBitWidth() / 8); 1732 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1733 1734 // Remove intrinsic. 1735 CI->eraseFromParent(); 1736 return; 1737 } 1738 1739 if (IsX86 && Name == "sse2.storel.dq") { 1740 Value *Arg0 = CI->getArgOperand(0); 1741 Value *Arg1 = CI->getArgOperand(1); 1742 1743 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 1744 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 1745 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 1746 Value *BC = Builder.CreateBitCast(Arg0, 1747 PointerType::getUnqual(Elt->getType()), 1748 "cast"); 1749 Builder.CreateAlignedStore(Elt, BC, 1); 1750 1751 // Remove intrinsic. 1752 CI->eraseFromParent(); 1753 return; 1754 } 1755 1756 if (IsX86 && (Name.startswith("sse.storeu.") || 1757 Name.startswith("sse2.storeu.") || 1758 Name.startswith("avx.storeu."))) { 1759 Value *Arg0 = CI->getArgOperand(0); 1760 Value *Arg1 = CI->getArgOperand(1); 1761 1762 Arg0 = Builder.CreateBitCast(Arg0, 1763 PointerType::getUnqual(Arg1->getType()), 1764 "cast"); 1765 Builder.CreateAlignedStore(Arg1, Arg0, 1); 1766 1767 // Remove intrinsic. 1768 CI->eraseFromParent(); 1769 return; 1770 } 1771 1772 if (IsX86 && Name == "avx512.mask.store.ss") { 1773 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1)); 1774 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1775 Mask, false); 1776 1777 // Remove intrinsic. 1778 CI->eraseFromParent(); 1779 return; 1780 } 1781 1782 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 1783 // "avx512.mask.storeu." or "avx512.mask.store." 1784 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 1785 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1786 CI->getArgOperand(2), Aligned); 1787 1788 // Remove intrinsic. 1789 CI->eraseFromParent(); 1790 return; 1791 } 1792 1793 Value *Rep; 1794 // Upgrade packed integer vector compare intrinsics to compare instructions. 1795 if (IsX86 && (Name.startswith("sse2.pcmp") || 1796 Name.startswith("avx2.pcmp"))) { 1797 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 1798 bool CmpEq = Name[9] == 'e'; 1799 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 1800 CI->getArgOperand(0), CI->getArgOperand(1)); 1801 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 1802 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) { 1803 Type *ExtTy = Type::getInt32Ty(C); 1804 if (CI->getOperand(0)->getType()->isIntegerTy(8)) 1805 ExtTy = Type::getInt64Ty(C); 1806 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 1807 ExtTy->getPrimitiveSizeInBits(); 1808 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy); 1809 Rep = Builder.CreateVectorSplat(NumElts, Rep); 1810 } else if (IsX86 && (Name == "sse.sqrt.ss" || 1811 Name == "sse2.sqrt.sd")) { 1812 Value *Vec = CI->getArgOperand(0); 1813 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0); 1814 Function *Intr = Intrinsic::getDeclaration(F->getParent(), 1815 Intrinsic::sqrt, Elt0->getType()); 1816 Elt0 = Builder.CreateCall(Intr, Elt0); 1817 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0); 1818 } else if (IsX86 && (Name.startswith("avx.sqrt.p") || 1819 Name.startswith("sse2.sqrt.p") || 1820 Name.startswith("sse.sqrt.p"))) { 1821 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1822 Intrinsic::sqrt, 1823 CI->getType()), 1824 {CI->getArgOperand(0)}); 1825 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) { 1826 if (CI->getNumArgOperands() == 4 && 1827 (!isa<ConstantInt>(CI->getArgOperand(3)) || 1828 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 1829 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512 1830 : Intrinsic::x86_avx512_sqrt_pd_512; 1831 1832 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) }; 1833 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 1834 IID), Args); 1835 } else { 1836 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1837 Intrinsic::sqrt, 1838 CI->getType()), 1839 {CI->getArgOperand(0)}); 1840 } 1841 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1842 CI->getArgOperand(1)); 1843 } else if (IsX86 && (Name.startswith("avx512.ptestm") || 1844 Name.startswith("avx512.ptestnm"))) { 1845 Value *Op0 = CI->getArgOperand(0); 1846 Value *Op1 = CI->getArgOperand(1); 1847 Value *Mask = CI->getArgOperand(2); 1848 Rep = Builder.CreateAnd(Op0, Op1); 1849 llvm::Type *Ty = Op0->getType(); 1850 Value *Zero = llvm::Constant::getNullValue(Ty); 1851 ICmpInst::Predicate Pred = 1852 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ; 1853 Rep = Builder.CreateICmp(Pred, Rep, Zero); 1854 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); 1855 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ 1856 unsigned NumElts = 1857 CI->getArgOperand(1)->getType()->getVectorNumElements(); 1858 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); 1859 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1860 CI->getArgOperand(1)); 1861 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) { 1862 unsigned NumElts = CI->getType()->getScalarSizeInBits(); 1863 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts); 1864 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts); 1865 uint32_t Indices[64]; 1866 for (unsigned i = 0; i != NumElts; ++i) 1867 Indices[i] = i; 1868 1869 // First extract half of each vector. This gives better codegen than 1870 // doing it in a single shuffle. 1871 LHS = Builder.CreateShuffleVector(LHS, LHS, 1872 makeArrayRef(Indices, NumElts / 2)); 1873 RHS = Builder.CreateShuffleVector(RHS, RHS, 1874 makeArrayRef(Indices, NumElts / 2)); 1875 // Concat the vectors. 1876 // NOTE: Operands have to be swapped to match intrinsic definition. 1877 Rep = Builder.CreateShuffleVector(RHS, LHS, 1878 makeArrayRef(Indices, NumElts)); 1879 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1880 } else if (IsX86 && Name == "avx512.kand.w") { 1881 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1882 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1883 Rep = Builder.CreateAnd(LHS, RHS); 1884 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1885 } else if (IsX86 && Name == "avx512.kandn.w") { 1886 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1887 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1888 LHS = Builder.CreateNot(LHS); 1889 Rep = Builder.CreateAnd(LHS, RHS); 1890 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1891 } else if (IsX86 && Name == "avx512.kor.w") { 1892 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1893 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1894 Rep = Builder.CreateOr(LHS, RHS); 1895 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1896 } else if (IsX86 && Name == "avx512.kxor.w") { 1897 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1898 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1899 Rep = Builder.CreateXor(LHS, RHS); 1900 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1901 } else if (IsX86 && Name == "avx512.kxnor.w") { 1902 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1903 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1904 LHS = Builder.CreateNot(LHS); 1905 Rep = Builder.CreateXor(LHS, RHS); 1906 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1907 } else if (IsX86 && Name == "avx512.knot.w") { 1908 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1909 Rep = Builder.CreateNot(Rep); 1910 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1911 } else if (IsX86 && 1912 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) { 1913 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1914 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1915 Rep = Builder.CreateOr(LHS, RHS); 1916 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty()); 1917 Value *C; 1918 if (Name[14] == 'c') 1919 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty()); 1920 else 1921 C = ConstantInt::getNullValue(Builder.getInt16Ty()); 1922 Rep = Builder.CreateICmpEQ(Rep, C); 1923 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty()); 1924 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" || 1925 Name == "sse.sub.ss" || Name == "sse2.sub.sd" || 1926 Name == "sse.mul.ss" || Name == "sse2.mul.sd" || 1927 Name == "sse.div.ss" || Name == "sse2.div.sd")) { 1928 Type *I32Ty = Type::getInt32Ty(C); 1929 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1930 ConstantInt::get(I32Ty, 0)); 1931 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1932 ConstantInt::get(I32Ty, 0)); 1933 Value *EltOp; 1934 if (Name.contains(".add.")) 1935 EltOp = Builder.CreateFAdd(Elt0, Elt1); 1936 else if (Name.contains(".sub.")) 1937 EltOp = Builder.CreateFSub(Elt0, Elt1); 1938 else if (Name.contains(".mul.")) 1939 EltOp = Builder.CreateFMul(Elt0, Elt1); 1940 else 1941 EltOp = Builder.CreateFDiv(Elt0, Elt1); 1942 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp, 1943 ConstantInt::get(I32Ty, 0)); 1944 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 1945 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 1946 bool CmpEq = Name[16] == 'e'; 1947 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true); 1948 } else if (IsX86 && Name.startswith("avx512.mask.vpshufbitqmb.")) { 1949 Type *OpTy = CI->getArgOperand(0)->getType(); 1950 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1951 Intrinsic::ID IID; 1952 switch (VecWidth) { 1953 default: llvm_unreachable("Unexpected intrinsic"); 1954 case 128: IID = Intrinsic::x86_avx512_vpshufbitqmb_128; break; 1955 case 256: IID = Intrinsic::x86_avx512_vpshufbitqmb_256; break; 1956 case 512: IID = Intrinsic::x86_avx512_vpshufbitqmb_512; break; 1957 } 1958 1959 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1960 { CI->getOperand(0), CI->getArgOperand(1) }); 1961 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1962 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) { 1963 Type *OpTy = CI->getArgOperand(0)->getType(); 1964 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1965 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1966 Intrinsic::ID IID; 1967 if (VecWidth == 128 && EltWidth == 32) 1968 IID = Intrinsic::x86_avx512_fpclass_ps_128; 1969 else if (VecWidth == 256 && EltWidth == 32) 1970 IID = Intrinsic::x86_avx512_fpclass_ps_256; 1971 else if (VecWidth == 512 && EltWidth == 32) 1972 IID = Intrinsic::x86_avx512_fpclass_ps_512; 1973 else if (VecWidth == 128 && EltWidth == 64) 1974 IID = Intrinsic::x86_avx512_fpclass_pd_128; 1975 else if (VecWidth == 256 && EltWidth == 64) 1976 IID = Intrinsic::x86_avx512_fpclass_pd_256; 1977 else if (VecWidth == 512 && EltWidth == 64) 1978 IID = Intrinsic::x86_avx512_fpclass_pd_512; 1979 else 1980 llvm_unreachable("Unexpected intrinsic"); 1981 1982 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1983 { CI->getOperand(0), CI->getArgOperand(1) }); 1984 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1985 } else if (IsX86 && Name.startswith("avx512.mask.cmp.p")) { 1986 Type *OpTy = CI->getArgOperand(0)->getType(); 1987 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1988 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1989 Intrinsic::ID IID; 1990 if (VecWidth == 128 && EltWidth == 32) 1991 IID = Intrinsic::x86_avx512_cmp_ps_128; 1992 else if (VecWidth == 256 && EltWidth == 32) 1993 IID = Intrinsic::x86_avx512_cmp_ps_256; 1994 else if (VecWidth == 512 && EltWidth == 32) 1995 IID = Intrinsic::x86_avx512_cmp_ps_512; 1996 else if (VecWidth == 128 && EltWidth == 64) 1997 IID = Intrinsic::x86_avx512_cmp_pd_128; 1998 else if (VecWidth == 256 && EltWidth == 64) 1999 IID = Intrinsic::x86_avx512_cmp_pd_256; 2000 else if (VecWidth == 512 && EltWidth == 64) 2001 IID = Intrinsic::x86_avx512_cmp_pd_512; 2002 else 2003 llvm_unreachable("Unexpected intrinsic"); 2004 2005 SmallVector<Value *, 4> Args; 2006 Args.push_back(CI->getArgOperand(0)); 2007 Args.push_back(CI->getArgOperand(1)); 2008 Args.push_back(CI->getArgOperand(2)); 2009 if (CI->getNumArgOperands() == 5) 2010 Args.push_back(CI->getArgOperand(4)); 2011 2012 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2013 Args); 2014 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(3)); 2015 } else if (IsX86 && Name.startswith("avx512.mask.cmp.") && 2016 Name[16] != 'p') { 2017 // Integer compare intrinsics. 2018 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2019 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true); 2020 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) { 2021 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2022 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false); 2023 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") || 2024 Name.startswith("avx512.cvtw2mask.") || 2025 Name.startswith("avx512.cvtd2mask.") || 2026 Name.startswith("avx512.cvtq2mask."))) { 2027 Value *Op = CI->getArgOperand(0); 2028 Value *Zero = llvm::Constant::getNullValue(Op->getType()); 2029 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero); 2030 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr); 2031 } else if(IsX86 && (Name == "ssse3.pabs.b.128" || 2032 Name == "ssse3.pabs.w.128" || 2033 Name == "ssse3.pabs.d.128" || 2034 Name.startswith("avx2.pabs") || 2035 Name.startswith("avx512.mask.pabs"))) { 2036 Rep = upgradeAbs(Builder, *CI); 2037 } else if (IsX86 && (Name == "sse41.pmaxsb" || 2038 Name == "sse2.pmaxs.w" || 2039 Name == "sse41.pmaxsd" || 2040 Name.startswith("avx2.pmaxs") || 2041 Name.startswith("avx512.mask.pmaxs"))) { 2042 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 2043 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 2044 Name == "sse41.pmaxuw" || 2045 Name == "sse41.pmaxud" || 2046 Name.startswith("avx2.pmaxu") || 2047 Name.startswith("avx512.mask.pmaxu"))) { 2048 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 2049 } else if (IsX86 && (Name == "sse41.pminsb" || 2050 Name == "sse2.pmins.w" || 2051 Name == "sse41.pminsd" || 2052 Name.startswith("avx2.pmins") || 2053 Name.startswith("avx512.mask.pmins"))) { 2054 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 2055 } else if (IsX86 && (Name == "sse2.pminu.b" || 2056 Name == "sse41.pminuw" || 2057 Name == "sse41.pminud" || 2058 Name.startswith("avx2.pminu") || 2059 Name.startswith("avx512.mask.pminu"))) { 2060 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 2061 } else if (IsX86 && (Name == "sse2.pmulu.dq" || 2062 Name == "avx2.pmulu.dq" || 2063 Name == "avx512.pmulu.dq.512" || 2064 Name.startswith("avx512.mask.pmulu.dq."))) { 2065 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false); 2066 } else if (IsX86 && (Name == "sse41.pmuldq" || 2067 Name == "avx2.pmul.dq" || 2068 Name == "avx512.pmul.dq.512" || 2069 Name.startswith("avx512.mask.pmul.dq."))) { 2070 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true); 2071 } else if (IsX86 && (Name == "sse.cvtsi2ss" || 2072 Name == "sse2.cvtsi2sd" || 2073 Name == "sse.cvtsi642ss" || 2074 Name == "sse2.cvtsi642sd")) { 2075 Rep = Builder.CreateSIToFP(CI->getArgOperand(1), 2076 CI->getType()->getVectorElementType()); 2077 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 2078 } else if (IsX86 && Name == "avx512.cvtusi2sd") { 2079 Rep = Builder.CreateUIToFP(CI->getArgOperand(1), 2080 CI->getType()->getVectorElementType()); 2081 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 2082 } else if (IsX86 && Name == "sse2.cvtss2sd") { 2083 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0); 2084 Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType()); 2085 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 2086 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 2087 Name == "sse2.cvtdq2ps" || 2088 Name == "avx.cvtdq2.pd.256" || 2089 Name == "avx.cvtdq2.ps.256" || 2090 Name.startswith("avx512.mask.cvtdq2pd.") || 2091 Name.startswith("avx512.mask.cvtudq2pd.") || 2092 Name.startswith("avx512.mask.cvtdq2ps.") || 2093 Name.startswith("avx512.mask.cvtudq2ps.") || 2094 Name.startswith("avx512.mask.cvtqq2pd.") || 2095 Name.startswith("avx512.mask.cvtuqq2pd.") || 2096 Name == "avx512.mask.cvtqq2ps.256" || 2097 Name == "avx512.mask.cvtqq2ps.512" || 2098 Name == "avx512.mask.cvtuqq2ps.256" || 2099 Name == "avx512.mask.cvtuqq2ps.512" || 2100 Name == "sse2.cvtps2pd" || 2101 Name == "avx.cvt.ps2.pd.256" || 2102 Name == "avx512.mask.cvtps2pd.128" || 2103 Name == "avx512.mask.cvtps2pd.256")) { 2104 Type *DstTy = CI->getType(); 2105 Rep = CI->getArgOperand(0); 2106 Type *SrcTy = Rep->getType(); 2107 2108 unsigned NumDstElts = DstTy->getVectorNumElements(); 2109 if (NumDstElts < SrcTy->getVectorNumElements()) { 2110 assert(NumDstElts == 2 && "Unexpected vector size"); 2111 uint32_t ShuffleMask[2] = { 0, 1 }; 2112 Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); 2113 } 2114 2115 bool IsPS2PD = SrcTy->getVectorElementType()->isFloatTy(); 2116 bool IsUnsigned = (StringRef::npos != Name.find("cvtu")); 2117 if (IsPS2PD) 2118 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 2119 else if (CI->getNumArgOperands() == 4 && 2120 (!isa<ConstantInt>(CI->getArgOperand(3)) || 2121 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 2122 Intrinsic::ID IID = IsUnsigned ? Intrinsic::x86_avx512_uitofp_round 2123 : Intrinsic::x86_avx512_sitofp_round; 2124 Function *F = Intrinsic::getDeclaration(CI->getModule(), IID, 2125 { DstTy, SrcTy }); 2126 Rep = Builder.CreateCall(F, { Rep, CI->getArgOperand(3) }); 2127 } else { 2128 Rep = IsUnsigned ? Builder.CreateUIToFP(Rep, DstTy, "cvt") 2129 : Builder.CreateSIToFP(Rep, DstTy, "cvt"); 2130 } 2131 2132 if (CI->getNumArgOperands() >= 3) 2133 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2134 CI->getArgOperand(1)); 2135 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 2136 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2137 CI->getArgOperand(1), CI->getArgOperand(2), 2138 /*Aligned*/false); 2139 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 2140 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 2141 CI->getArgOperand(1),CI->getArgOperand(2), 2142 /*Aligned*/true); 2143 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { 2144 Type *ResultTy = CI->getType(); 2145 Type *PtrTy = ResultTy->getVectorElementType(); 2146 2147 // Cast the pointer to element type. 2148 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2149 llvm::PointerType::getUnqual(PtrTy)); 2150 2151 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2152 ResultTy->getVectorNumElements()); 2153 2154 Function *ELd = Intrinsic::getDeclaration(F->getParent(), 2155 Intrinsic::masked_expandload, 2156 ResultTy); 2157 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); 2158 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { 2159 Type *ResultTy = CI->getArgOperand(1)->getType(); 2160 Type *PtrTy = ResultTy->getVectorElementType(); 2161 2162 // Cast the pointer to element type. 2163 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 2164 llvm::PointerType::getUnqual(PtrTy)); 2165 2166 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2167 ResultTy->getVectorNumElements()); 2168 2169 Function *CSt = Intrinsic::getDeclaration(F->getParent(), 2170 Intrinsic::masked_compressstore, 2171 ResultTy); 2172 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); 2173 } else if (IsX86 && (Name.startswith("avx512.mask.compress.") || 2174 Name.startswith("avx512.mask.expand."))) { 2175 Type *ResultTy = CI->getType(); 2176 2177 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 2178 ResultTy->getVectorNumElements()); 2179 2180 bool IsCompress = Name[12] == 'c'; 2181 Intrinsic::ID IID = IsCompress ? Intrinsic::x86_avx512_mask_compress 2182 : Intrinsic::x86_avx512_mask_expand; 2183 Function *Intr = Intrinsic::getDeclaration(F->getParent(), IID, ResultTy); 2184 Rep = Builder.CreateCall(Intr, { CI->getOperand(0), CI->getOperand(1), 2185 MaskVec }); 2186 } else if (IsX86 && Name.startswith("xop.vpcom")) { 2187 bool IsSigned; 2188 if (Name.endswith("ub") || Name.endswith("uw") || Name.endswith("ud") || 2189 Name.endswith("uq")) 2190 IsSigned = false; 2191 else if (Name.endswith("b") || Name.endswith("w") || Name.endswith("d") || 2192 Name.endswith("q")) 2193 IsSigned = true; 2194 else 2195 llvm_unreachable("Unknown suffix"); 2196 2197 unsigned Imm; 2198 if (CI->getNumArgOperands() == 3) { 2199 Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2200 } else { 2201 Name = Name.substr(9); // strip off "xop.vpcom" 2202 if (Name.startswith("lt")) 2203 Imm = 0; 2204 else if (Name.startswith("le")) 2205 Imm = 1; 2206 else if (Name.startswith("gt")) 2207 Imm = 2; 2208 else if (Name.startswith("ge")) 2209 Imm = 3; 2210 else if (Name.startswith("eq")) 2211 Imm = 4; 2212 else if (Name.startswith("ne")) 2213 Imm = 5; 2214 else if (Name.startswith("false")) 2215 Imm = 6; 2216 else if (Name.startswith("true")) 2217 Imm = 7; 2218 else 2219 llvm_unreachable("Unknown condition"); 2220 } 2221 2222 Rep = upgradeX86vpcom(Builder, *CI, Imm, IsSigned); 2223 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 2224 Value *Sel = CI->getArgOperand(2); 2225 Value *NotSel = Builder.CreateNot(Sel); 2226 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 2227 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 2228 Rep = Builder.CreateOr(Sel0, Sel1); 2229 } else if (IsX86 && (Name.startswith("xop.vprot") || 2230 Name.startswith("avx512.prol") || 2231 Name.startswith("avx512.mask.prol"))) { 2232 Rep = upgradeX86Rotate(Builder, *CI, false); 2233 } else if (IsX86 && (Name.startswith("avx512.pror") || 2234 Name.startswith("avx512.mask.pror"))) { 2235 Rep = upgradeX86Rotate(Builder, *CI, true); 2236 } else if (IsX86 && (Name.startswith("avx512.vpshld.") || 2237 Name.startswith("avx512.mask.vpshld") || 2238 Name.startswith("avx512.maskz.vpshld"))) { 2239 bool ZeroMask = Name[11] == 'z'; 2240 Rep = upgradeX86ConcatShift(Builder, *CI, false, ZeroMask); 2241 } else if (IsX86 && (Name.startswith("avx512.vpshrd.") || 2242 Name.startswith("avx512.mask.vpshrd") || 2243 Name.startswith("avx512.maskz.vpshrd"))) { 2244 bool ZeroMask = Name[11] == 'z'; 2245 Rep = upgradeX86ConcatShift(Builder, *CI, true, ZeroMask); 2246 } else if (IsX86 && Name == "sse42.crc32.64.8") { 2247 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 2248 Intrinsic::x86_sse42_crc32_32_8); 2249 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 2250 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 2251 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 2252 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || 2253 Name.startswith("avx512.vbroadcast.s"))) { 2254 // Replace broadcasts with a series of insertelements. 2255 Type *VecTy = CI->getType(); 2256 Type *EltTy = VecTy->getVectorElementType(); 2257 unsigned EltNum = VecTy->getVectorNumElements(); 2258 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 2259 EltTy->getPointerTo()); 2260 Value *Load = Builder.CreateLoad(EltTy, Cast); 2261 Type *I32Ty = Type::getInt32Ty(C); 2262 Rep = UndefValue::get(VecTy); 2263 for (unsigned I = 0; I < EltNum; ++I) 2264 Rep = Builder.CreateInsertElement(Rep, Load, 2265 ConstantInt::get(I32Ty, I)); 2266 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 2267 Name.startswith("sse41.pmovzx") || 2268 Name.startswith("avx2.pmovsx") || 2269 Name.startswith("avx2.pmovzx") || 2270 Name.startswith("avx512.mask.pmovsx") || 2271 Name.startswith("avx512.mask.pmovzx"))) { 2272 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 2273 VectorType *DstTy = cast<VectorType>(CI->getType()); 2274 unsigned NumDstElts = DstTy->getNumElements(); 2275 2276 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 2277 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2278 for (unsigned i = 0; i != NumDstElts; ++i) 2279 ShuffleMask[i] = i; 2280 2281 Value *SV = Builder.CreateShuffleVector( 2282 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 2283 2284 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 2285 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 2286 : Builder.CreateZExt(SV, DstTy); 2287 // If there are 3 arguments, it's a masked intrinsic so we need a select. 2288 if (CI->getNumArgOperands() == 3) 2289 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2290 CI->getArgOperand(1)); 2291 } else if (Name == "avx512.mask.pmov.qd.256" || 2292 Name == "avx512.mask.pmov.qd.512" || 2293 Name == "avx512.mask.pmov.wb.256" || 2294 Name == "avx512.mask.pmov.wb.512") { 2295 Type *Ty = CI->getArgOperand(1)->getType(); 2296 Rep = Builder.CreateTrunc(CI->getArgOperand(0), Ty); 2297 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2298 CI->getArgOperand(1)); 2299 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 2300 Name == "avx2.vbroadcasti128")) { 2301 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 2302 Type *EltTy = CI->getType()->getVectorElementType(); 2303 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 2304 Type *VT = VectorType::get(EltTy, NumSrcElts); 2305 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 2306 PointerType::getUnqual(VT)); 2307 Value *Load = Builder.CreateAlignedLoad(VT, Op, 1); 2308 if (NumSrcElts == 2) 2309 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2310 { 0, 1, 0, 1 }); 2311 else 2312 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2313 { 0, 1, 2, 3, 0, 1, 2, 3 }); 2314 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") || 2315 Name.startswith("avx512.mask.shuf.f"))) { 2316 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2317 Type *VT = CI->getType(); 2318 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128; 2319 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits(); 2320 unsigned ControlBitsMask = NumLanes - 1; 2321 unsigned NumControlBits = NumLanes / 2; 2322 SmallVector<uint32_t, 8> ShuffleMask(0); 2323 2324 for (unsigned l = 0; l != NumLanes; ++l) { 2325 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask; 2326 // We actually need the other source. 2327 if (l >= NumLanes / 2) 2328 LaneMask += NumLanes; 2329 for (unsigned i = 0; i != NumElementsInLane; ++i) 2330 ShuffleMask.push_back(LaneMask * NumElementsInLane + i); 2331 } 2332 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2333 CI->getArgOperand(1), ShuffleMask); 2334 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2335 CI->getArgOperand(3)); 2336 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || 2337 Name.startswith("avx512.mask.broadcasti"))) { 2338 unsigned NumSrcElts = 2339 CI->getArgOperand(0)->getType()->getVectorNumElements(); 2340 unsigned NumDstElts = CI->getType()->getVectorNumElements(); 2341 2342 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2343 for (unsigned i = 0; i != NumDstElts; ++i) 2344 ShuffleMask[i] = i % NumSrcElts; 2345 2346 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2347 CI->getArgOperand(0), 2348 ShuffleMask); 2349 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2350 CI->getArgOperand(1)); 2351 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 2352 Name.startswith("avx2.vbroadcast") || 2353 Name.startswith("avx512.pbroadcast") || 2354 Name.startswith("avx512.mask.broadcast.s"))) { 2355 // Replace vp?broadcasts with a vector shuffle. 2356 Value *Op = CI->getArgOperand(0); 2357 unsigned NumElts = CI->getType()->getVectorNumElements(); 2358 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 2359 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 2360 Constant::getNullValue(MaskTy)); 2361 2362 if (CI->getNumArgOperands() == 3) 2363 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2364 CI->getArgOperand(1)); 2365 } else if (IsX86 && (Name.startswith("sse2.padds.") || 2366 Name.startswith("sse2.psubs.") || 2367 Name.startswith("avx2.padds.") || 2368 Name.startswith("avx2.psubs.") || 2369 Name.startswith("avx512.padds.") || 2370 Name.startswith("avx512.psubs.") || 2371 Name.startswith("avx512.mask.padds.") || 2372 Name.startswith("avx512.mask.psubs."))) { 2373 bool IsAdd = Name.contains(".padds"); 2374 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, true, IsAdd); 2375 } else if (IsX86 && (Name.startswith("sse2.paddus.") || 2376 Name.startswith("sse2.psubus.") || 2377 Name.startswith("avx2.paddus.") || 2378 Name.startswith("avx2.psubus.") || 2379 Name.startswith("avx512.mask.paddus.") || 2380 Name.startswith("avx512.mask.psubus."))) { 2381 bool IsAdd = Name.contains(".paddus"); 2382 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, false, IsAdd); 2383 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 2384 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2385 CI->getArgOperand(1), 2386 CI->getArgOperand(2), 2387 CI->getArgOperand(3), 2388 CI->getArgOperand(4), 2389 false); 2390 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 2391 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2392 CI->getArgOperand(1), 2393 CI->getArgOperand(2), 2394 CI->getArgOperand(3), 2395 CI->getArgOperand(4), 2396 true); 2397 } else if (IsX86 && (Name == "sse2.psll.dq" || 2398 Name == "avx2.psll.dq")) { 2399 // 128/256-bit shift left specified in bits. 2400 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2401 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 2402 Shift / 8); // Shift is in bits. 2403 } else if (IsX86 && (Name == "sse2.psrl.dq" || 2404 Name == "avx2.psrl.dq")) { 2405 // 128/256-bit shift right specified in bits. 2406 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2407 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 2408 Shift / 8); // Shift is in bits. 2409 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 2410 Name == "avx2.psll.dq.bs" || 2411 Name == "avx512.psll.dq.512")) { 2412 // 128/256/512-bit shift left specified in bytes. 2413 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2414 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2415 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 2416 Name == "avx2.psrl.dq.bs" || 2417 Name == "avx512.psrl.dq.512")) { 2418 // 128/256/512-bit shift right specified in bytes. 2419 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2420 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2421 } else if (IsX86 && (Name == "sse41.pblendw" || 2422 Name.startswith("sse41.blendp") || 2423 Name.startswith("avx.blend.p") || 2424 Name == "avx2.pblendw" || 2425 Name.startswith("avx2.pblendd."))) { 2426 Value *Op0 = CI->getArgOperand(0); 2427 Value *Op1 = CI->getArgOperand(1); 2428 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2429 VectorType *VecTy = cast<VectorType>(CI->getType()); 2430 unsigned NumElts = VecTy->getNumElements(); 2431 2432 SmallVector<uint32_t, 16> Idxs(NumElts); 2433 for (unsigned i = 0; i != NumElts; ++i) 2434 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 2435 2436 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2437 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 2438 Name == "avx2.vinserti128" || 2439 Name.startswith("avx512.mask.insert"))) { 2440 Value *Op0 = CI->getArgOperand(0); 2441 Value *Op1 = CI->getArgOperand(1); 2442 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2443 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2444 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 2445 unsigned Scale = DstNumElts / SrcNumElts; 2446 2447 // Mask off the high bits of the immediate value; hardware ignores those. 2448 Imm = Imm % Scale; 2449 2450 // Extend the second operand into a vector the size of the destination. 2451 Value *UndefV = UndefValue::get(Op1->getType()); 2452 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2453 for (unsigned i = 0; i != SrcNumElts; ++i) 2454 Idxs[i] = i; 2455 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 2456 Idxs[i] = SrcNumElts; 2457 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 2458 2459 // Insert the second operand into the first operand. 2460 2461 // Note that there is no guarantee that instruction lowering will actually 2462 // produce a vinsertf128 instruction for the created shuffles. In 2463 // particular, the 0 immediate case involves no lane changes, so it can 2464 // be handled as a blend. 2465 2466 // Example of shuffle mask for 32-bit elements: 2467 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 2468 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 2469 2470 // First fill with identify mask. 2471 for (unsigned i = 0; i != DstNumElts; ++i) 2472 Idxs[i] = i; 2473 // Then replace the elements where we need to insert. 2474 for (unsigned i = 0; i != SrcNumElts; ++i) 2475 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 2476 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 2477 2478 // If the intrinsic has a mask operand, handle that. 2479 if (CI->getNumArgOperands() == 5) 2480 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2481 CI->getArgOperand(3)); 2482 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 2483 Name == "avx2.vextracti128" || 2484 Name.startswith("avx512.mask.vextract"))) { 2485 Value *Op0 = CI->getArgOperand(0); 2486 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2487 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2488 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 2489 unsigned Scale = SrcNumElts / DstNumElts; 2490 2491 // Mask off the high bits of the immediate value; hardware ignores those. 2492 Imm = Imm % Scale; 2493 2494 // Get indexes for the subvector of the input vector. 2495 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2496 for (unsigned i = 0; i != DstNumElts; ++i) { 2497 Idxs[i] = i + (Imm * DstNumElts); 2498 } 2499 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2500 2501 // If the intrinsic has a mask operand, handle that. 2502 if (CI->getNumArgOperands() == 4) 2503 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2504 CI->getArgOperand(2)); 2505 } else if (!IsX86 && Name == "stackprotectorcheck") { 2506 Rep = nullptr; 2507 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 2508 Name.startswith("avx512.mask.perm.di."))) { 2509 Value *Op0 = CI->getArgOperand(0); 2510 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2511 VectorType *VecTy = cast<VectorType>(CI->getType()); 2512 unsigned NumElts = VecTy->getNumElements(); 2513 2514 SmallVector<uint32_t, 8> Idxs(NumElts); 2515 for (unsigned i = 0; i != NumElts; ++i) 2516 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 2517 2518 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2519 2520 if (CI->getNumArgOperands() == 4) 2521 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2522 CI->getArgOperand(2)); 2523 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") || 2524 Name == "avx2.vperm2i128")) { 2525 // The immediate permute control byte looks like this: 2526 // [1:0] - select 128 bits from sources for low half of destination 2527 // [2] - ignore 2528 // [3] - zero low half of destination 2529 // [5:4] - select 128 bits from sources for high half of destination 2530 // [6] - ignore 2531 // [7] - zero high half of destination 2532 2533 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2534 2535 unsigned NumElts = CI->getType()->getVectorNumElements(); 2536 unsigned HalfSize = NumElts / 2; 2537 SmallVector<uint32_t, 8> ShuffleMask(NumElts); 2538 2539 // Determine which operand(s) are actually in use for this instruction. 2540 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2541 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2542 2543 // If needed, replace operands based on zero mask. 2544 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0; 2545 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1; 2546 2547 // Permute low half of result. 2548 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0; 2549 for (unsigned i = 0; i < HalfSize; ++i) 2550 ShuffleMask[i] = StartIndex + i; 2551 2552 // Permute high half of result. 2553 StartIndex = (Imm & 0x10) ? HalfSize : 0; 2554 for (unsigned i = 0; i < HalfSize; ++i) 2555 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i; 2556 2557 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2558 2559 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 2560 Name == "sse2.pshuf.d" || 2561 Name.startswith("avx512.mask.vpermil.p") || 2562 Name.startswith("avx512.mask.pshuf.d."))) { 2563 Value *Op0 = CI->getArgOperand(0); 2564 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2565 VectorType *VecTy = cast<VectorType>(CI->getType()); 2566 unsigned NumElts = VecTy->getNumElements(); 2567 // Calculate the size of each index in the immediate. 2568 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 2569 unsigned IdxMask = ((1 << IdxSize) - 1); 2570 2571 SmallVector<uint32_t, 8> Idxs(NumElts); 2572 // Lookup the bits for this element, wrapping around the immediate every 2573 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 2574 // to offset by the first index of each group. 2575 for (unsigned i = 0; i != NumElts; ++i) 2576 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 2577 2578 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2579 2580 if (CI->getNumArgOperands() == 4) 2581 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2582 CI->getArgOperand(2)); 2583 } else if (IsX86 && (Name == "sse2.pshufl.w" || 2584 Name.startswith("avx512.mask.pshufl.w."))) { 2585 Value *Op0 = CI->getArgOperand(0); 2586 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2587 unsigned NumElts = CI->getType()->getVectorNumElements(); 2588 2589 SmallVector<uint32_t, 16> Idxs(NumElts); 2590 for (unsigned l = 0; l != NumElts; l += 8) { 2591 for (unsigned i = 0; i != 4; ++i) 2592 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 2593 for (unsigned i = 4; i != 8; ++i) 2594 Idxs[i + l] = i + l; 2595 } 2596 2597 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2598 2599 if (CI->getNumArgOperands() == 4) 2600 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2601 CI->getArgOperand(2)); 2602 } else if (IsX86 && (Name == "sse2.pshufh.w" || 2603 Name.startswith("avx512.mask.pshufh.w."))) { 2604 Value *Op0 = CI->getArgOperand(0); 2605 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2606 unsigned NumElts = CI->getType()->getVectorNumElements(); 2607 2608 SmallVector<uint32_t, 16> Idxs(NumElts); 2609 for (unsigned l = 0; l != NumElts; l += 8) { 2610 for (unsigned i = 0; i != 4; ++i) 2611 Idxs[i + l] = i + l; 2612 for (unsigned i = 0; i != 4; ++i) 2613 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 2614 } 2615 2616 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2617 2618 if (CI->getNumArgOperands() == 4) 2619 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2620 CI->getArgOperand(2)); 2621 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 2622 Value *Op0 = CI->getArgOperand(0); 2623 Value *Op1 = CI->getArgOperand(1); 2624 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2625 unsigned NumElts = CI->getType()->getVectorNumElements(); 2626 2627 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2628 unsigned HalfLaneElts = NumLaneElts / 2; 2629 2630 SmallVector<uint32_t, 16> Idxs(NumElts); 2631 for (unsigned i = 0; i != NumElts; ++i) { 2632 // Base index is the starting element of the lane. 2633 Idxs[i] = i - (i % NumLaneElts); 2634 // If we are half way through the lane switch to the other source. 2635 if ((i % NumLaneElts) >= HalfLaneElts) 2636 Idxs[i] += NumElts; 2637 // Now select the specific element. By adding HalfLaneElts bits from 2638 // the immediate. Wrapping around the immediate every 8-bits. 2639 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 2640 } 2641 2642 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2643 2644 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2645 CI->getArgOperand(3)); 2646 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 2647 Name.startswith("avx512.mask.movshdup") || 2648 Name.startswith("avx512.mask.movsldup"))) { 2649 Value *Op0 = CI->getArgOperand(0); 2650 unsigned NumElts = CI->getType()->getVectorNumElements(); 2651 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2652 2653 unsigned Offset = 0; 2654 if (Name.startswith("avx512.mask.movshdup.")) 2655 Offset = 1; 2656 2657 SmallVector<uint32_t, 16> Idxs(NumElts); 2658 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 2659 for (unsigned i = 0; i != NumLaneElts; i += 2) { 2660 Idxs[i + l + 0] = i + l + Offset; 2661 Idxs[i + l + 1] = i + l + Offset; 2662 } 2663 2664 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2665 2666 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2667 CI->getArgOperand(1)); 2668 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 2669 Name.startswith("avx512.mask.unpckl."))) { 2670 Value *Op0 = CI->getArgOperand(0); 2671 Value *Op1 = CI->getArgOperand(1); 2672 int NumElts = CI->getType()->getVectorNumElements(); 2673 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2674 2675 SmallVector<uint32_t, 64> Idxs(NumElts); 2676 for (int l = 0; l != NumElts; l += NumLaneElts) 2677 for (int i = 0; i != NumLaneElts; ++i) 2678 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 2679 2680 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2681 2682 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2683 CI->getArgOperand(2)); 2684 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 2685 Name.startswith("avx512.mask.unpckh."))) { 2686 Value *Op0 = CI->getArgOperand(0); 2687 Value *Op1 = CI->getArgOperand(1); 2688 int NumElts = CI->getType()->getVectorNumElements(); 2689 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2690 2691 SmallVector<uint32_t, 64> Idxs(NumElts); 2692 for (int l = 0; l != NumElts; l += NumLaneElts) 2693 for (int i = 0; i != NumLaneElts; ++i) 2694 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 2695 2696 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2697 2698 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2699 CI->getArgOperand(2)); 2700 } else if (IsX86 && (Name.startswith("avx512.mask.and.") || 2701 Name.startswith("avx512.mask.pand."))) { 2702 VectorType *FTy = cast<VectorType>(CI->getType()); 2703 VectorType *ITy = VectorType::getInteger(FTy); 2704 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2705 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2706 Rep = Builder.CreateBitCast(Rep, FTy); 2707 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2708 CI->getArgOperand(2)); 2709 } else if (IsX86 && (Name.startswith("avx512.mask.andn.") || 2710 Name.startswith("avx512.mask.pandn."))) { 2711 VectorType *FTy = cast<VectorType>(CI->getType()); 2712 VectorType *ITy = VectorType::getInteger(FTy); 2713 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 2714 Rep = Builder.CreateAnd(Rep, 2715 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2716 Rep = Builder.CreateBitCast(Rep, FTy); 2717 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2718 CI->getArgOperand(2)); 2719 } else if (IsX86 && (Name.startswith("avx512.mask.or.") || 2720 Name.startswith("avx512.mask.por."))) { 2721 VectorType *FTy = cast<VectorType>(CI->getType()); 2722 VectorType *ITy = VectorType::getInteger(FTy); 2723 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2724 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2725 Rep = Builder.CreateBitCast(Rep, FTy); 2726 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2727 CI->getArgOperand(2)); 2728 } else if (IsX86 && (Name.startswith("avx512.mask.xor.") || 2729 Name.startswith("avx512.mask.pxor."))) { 2730 VectorType *FTy = cast<VectorType>(CI->getType()); 2731 VectorType *ITy = VectorType::getInteger(FTy); 2732 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2733 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2734 Rep = Builder.CreateBitCast(Rep, FTy); 2735 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2736 CI->getArgOperand(2)); 2737 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 2738 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2739 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2740 CI->getArgOperand(2)); 2741 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 2742 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2743 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2744 CI->getArgOperand(2)); 2745 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 2746 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2747 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2748 CI->getArgOperand(2)); 2749 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) { 2750 if (Name.endswith(".512")) { 2751 Intrinsic::ID IID; 2752 if (Name[17] == 's') 2753 IID = Intrinsic::x86_avx512_add_ps_512; 2754 else 2755 IID = Intrinsic::x86_avx512_add_pd_512; 2756 2757 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2758 { CI->getArgOperand(0), CI->getArgOperand(1), 2759 CI->getArgOperand(4) }); 2760 } else { 2761 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2762 } 2763 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2764 CI->getArgOperand(2)); 2765 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 2766 if (Name.endswith(".512")) { 2767 Intrinsic::ID IID; 2768 if (Name[17] == 's') 2769 IID = Intrinsic::x86_avx512_div_ps_512; 2770 else 2771 IID = Intrinsic::x86_avx512_div_pd_512; 2772 2773 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2774 { CI->getArgOperand(0), CI->getArgOperand(1), 2775 CI->getArgOperand(4) }); 2776 } else { 2777 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 2778 } 2779 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2780 CI->getArgOperand(2)); 2781 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 2782 if (Name.endswith(".512")) { 2783 Intrinsic::ID IID; 2784 if (Name[17] == 's') 2785 IID = Intrinsic::x86_avx512_mul_ps_512; 2786 else 2787 IID = Intrinsic::x86_avx512_mul_pd_512; 2788 2789 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2790 { CI->getArgOperand(0), CI->getArgOperand(1), 2791 CI->getArgOperand(4) }); 2792 } else { 2793 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2794 } 2795 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2796 CI->getArgOperand(2)); 2797 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 2798 if (Name.endswith(".512")) { 2799 Intrinsic::ID IID; 2800 if (Name[17] == 's') 2801 IID = Intrinsic::x86_avx512_sub_ps_512; 2802 else 2803 IID = Intrinsic::x86_avx512_sub_pd_512; 2804 2805 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2806 { CI->getArgOperand(0), CI->getArgOperand(1), 2807 CI->getArgOperand(4) }); 2808 } else { 2809 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2810 } 2811 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2812 CI->getArgOperand(2)); 2813 } else if (IsX86 && (Name.startswith("avx512.mask.max.p") || 2814 Name.startswith("avx512.mask.min.p")) && 2815 Name.drop_front(18) == ".512") { 2816 bool IsDouble = Name[17] == 'd'; 2817 bool IsMin = Name[13] == 'i'; 2818 static const Intrinsic::ID MinMaxTbl[2][2] = { 2819 { Intrinsic::x86_avx512_max_ps_512, Intrinsic::x86_avx512_max_pd_512 }, 2820 { Intrinsic::x86_avx512_min_ps_512, Intrinsic::x86_avx512_min_pd_512 } 2821 }; 2822 Intrinsic::ID IID = MinMaxTbl[IsMin][IsDouble]; 2823 2824 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2825 { CI->getArgOperand(0), CI->getArgOperand(1), 2826 CI->getArgOperand(4) }); 2827 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2828 CI->getArgOperand(2)); 2829 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 2830 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 2831 Intrinsic::ctlz, 2832 CI->getType()), 2833 { CI->getArgOperand(0), Builder.getInt1(false) }); 2834 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2835 CI->getArgOperand(1)); 2836 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 2837 bool IsImmediate = Name[16] == 'i' || 2838 (Name.size() > 18 && Name[18] == 'i'); 2839 bool IsVariable = Name[16] == 'v'; 2840 char Size = Name[16] == '.' ? Name[17] : 2841 Name[17] == '.' ? Name[18] : 2842 Name[18] == '.' ? Name[19] : 2843 Name[20]; 2844 2845 Intrinsic::ID IID; 2846 if (IsVariable && Name[17] != '.') { 2847 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 2848 IID = Intrinsic::x86_avx2_psllv_q; 2849 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 2850 IID = Intrinsic::x86_avx2_psllv_q_256; 2851 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 2852 IID = Intrinsic::x86_avx2_psllv_d; 2853 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 2854 IID = Intrinsic::x86_avx2_psllv_d_256; 2855 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 2856 IID = Intrinsic::x86_avx512_psllv_w_128; 2857 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 2858 IID = Intrinsic::x86_avx512_psllv_w_256; 2859 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 2860 IID = Intrinsic::x86_avx512_psllv_w_512; 2861 else 2862 llvm_unreachable("Unexpected size"); 2863 } else if (Name.endswith(".128")) { 2864 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 2865 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 2866 : Intrinsic::x86_sse2_psll_d; 2867 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 2868 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 2869 : Intrinsic::x86_sse2_psll_q; 2870 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 2871 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 2872 : Intrinsic::x86_sse2_psll_w; 2873 else 2874 llvm_unreachable("Unexpected size"); 2875 } else if (Name.endswith(".256")) { 2876 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 2877 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 2878 : Intrinsic::x86_avx2_psll_d; 2879 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 2880 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 2881 : Intrinsic::x86_avx2_psll_q; 2882 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 2883 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 2884 : Intrinsic::x86_avx2_psll_w; 2885 else 2886 llvm_unreachable("Unexpected size"); 2887 } else { 2888 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 2889 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 2890 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 2891 Intrinsic::x86_avx512_psll_d_512; 2892 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 2893 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 2894 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 2895 Intrinsic::x86_avx512_psll_q_512; 2896 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 2897 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 2898 : Intrinsic::x86_avx512_psll_w_512; 2899 else 2900 llvm_unreachable("Unexpected size"); 2901 } 2902 2903 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2904 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 2905 bool IsImmediate = Name[16] == 'i' || 2906 (Name.size() > 18 && Name[18] == 'i'); 2907 bool IsVariable = Name[16] == 'v'; 2908 char Size = Name[16] == '.' ? Name[17] : 2909 Name[17] == '.' ? Name[18] : 2910 Name[18] == '.' ? Name[19] : 2911 Name[20]; 2912 2913 Intrinsic::ID IID; 2914 if (IsVariable && Name[17] != '.') { 2915 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 2916 IID = Intrinsic::x86_avx2_psrlv_q; 2917 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 2918 IID = Intrinsic::x86_avx2_psrlv_q_256; 2919 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 2920 IID = Intrinsic::x86_avx2_psrlv_d; 2921 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 2922 IID = Intrinsic::x86_avx2_psrlv_d_256; 2923 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 2924 IID = Intrinsic::x86_avx512_psrlv_w_128; 2925 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 2926 IID = Intrinsic::x86_avx512_psrlv_w_256; 2927 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 2928 IID = Intrinsic::x86_avx512_psrlv_w_512; 2929 else 2930 llvm_unreachable("Unexpected size"); 2931 } else if (Name.endswith(".128")) { 2932 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 2933 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 2934 : Intrinsic::x86_sse2_psrl_d; 2935 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 2936 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 2937 : Intrinsic::x86_sse2_psrl_q; 2938 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 2939 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 2940 : Intrinsic::x86_sse2_psrl_w; 2941 else 2942 llvm_unreachable("Unexpected size"); 2943 } else if (Name.endswith(".256")) { 2944 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 2945 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 2946 : Intrinsic::x86_avx2_psrl_d; 2947 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 2948 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 2949 : Intrinsic::x86_avx2_psrl_q; 2950 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 2951 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 2952 : Intrinsic::x86_avx2_psrl_w; 2953 else 2954 llvm_unreachable("Unexpected size"); 2955 } else { 2956 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 2957 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 2958 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 2959 Intrinsic::x86_avx512_psrl_d_512; 2960 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 2961 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 2962 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 2963 Intrinsic::x86_avx512_psrl_q_512; 2964 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 2965 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 2966 : Intrinsic::x86_avx512_psrl_w_512; 2967 else 2968 llvm_unreachable("Unexpected size"); 2969 } 2970 2971 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2972 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 2973 bool IsImmediate = Name[16] == 'i' || 2974 (Name.size() > 18 && Name[18] == 'i'); 2975 bool IsVariable = Name[16] == 'v'; 2976 char Size = Name[16] == '.' ? Name[17] : 2977 Name[17] == '.' ? Name[18] : 2978 Name[18] == '.' ? Name[19] : 2979 Name[20]; 2980 2981 Intrinsic::ID IID; 2982 if (IsVariable && Name[17] != '.') { 2983 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 2984 IID = Intrinsic::x86_avx2_psrav_d; 2985 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 2986 IID = Intrinsic::x86_avx2_psrav_d_256; 2987 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 2988 IID = Intrinsic::x86_avx512_psrav_w_128; 2989 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 2990 IID = Intrinsic::x86_avx512_psrav_w_256; 2991 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 2992 IID = Intrinsic::x86_avx512_psrav_w_512; 2993 else 2994 llvm_unreachable("Unexpected size"); 2995 } else if (Name.endswith(".128")) { 2996 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 2997 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 2998 : Intrinsic::x86_sse2_psra_d; 2999 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 3000 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 3001 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 3002 Intrinsic::x86_avx512_psra_q_128; 3003 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 3004 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 3005 : Intrinsic::x86_sse2_psra_w; 3006 else 3007 llvm_unreachable("Unexpected size"); 3008 } else if (Name.endswith(".256")) { 3009 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 3010 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 3011 : Intrinsic::x86_avx2_psra_d; 3012 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 3013 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 3014 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 3015 Intrinsic::x86_avx512_psra_q_256; 3016 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 3017 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 3018 : Intrinsic::x86_avx2_psra_w; 3019 else 3020 llvm_unreachable("Unexpected size"); 3021 } else { 3022 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 3023 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 3024 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 3025 Intrinsic::x86_avx512_psra_d_512; 3026 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 3027 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 3028 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 3029 Intrinsic::x86_avx512_psra_q_512; 3030 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 3031 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 3032 : Intrinsic::x86_avx512_psra_w_512; 3033 else 3034 llvm_unreachable("Unexpected size"); 3035 } 3036 3037 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 3038 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 3039 Rep = upgradeMaskedMove(Builder, *CI); 3040 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 3041 Rep = UpgradeMaskToInt(Builder, *CI); 3042 } else if (IsX86 && Name.endswith(".movntdqa")) { 3043 Module *M = F->getParent(); 3044 MDNode *Node = MDNode::get( 3045 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 3046 3047 Value *Ptr = CI->getArgOperand(0); 3048 VectorType *VTy = cast<VectorType>(CI->getType()); 3049 3050 // Convert the type of the pointer to a pointer to the stored type. 3051 Value *BC = 3052 Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); 3053 LoadInst *LI = Builder.CreateAlignedLoad(VTy, BC, VTy->getBitWidth() / 8); 3054 LI->setMetadata(M->getMDKindID("nontemporal"), Node); 3055 Rep = LI; 3056 } else if (IsX86 && (Name.startswith("fma.vfmadd.") || 3057 Name.startswith("fma.vfmsub.") || 3058 Name.startswith("fma.vfnmadd.") || 3059 Name.startswith("fma.vfnmsub."))) { 3060 bool NegMul = Name[6] == 'n'; 3061 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's'; 3062 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's'; 3063 3064 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3065 CI->getArgOperand(2) }; 3066 3067 if (IsScalar) { 3068 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 3069 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 3070 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 3071 } 3072 3073 if (NegMul && !IsScalar) 3074 Ops[0] = Builder.CreateFNeg(Ops[0]); 3075 if (NegMul && IsScalar) 3076 Ops[1] = Builder.CreateFNeg(Ops[1]); 3077 if (NegAcc) 3078 Ops[2] = Builder.CreateFNeg(Ops[2]); 3079 3080 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 3081 Intrinsic::fma, 3082 Ops[0]->getType()), 3083 Ops); 3084 3085 if (IsScalar) 3086 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, 3087 (uint64_t)0); 3088 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) { 3089 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3090 CI->getArgOperand(2) }; 3091 3092 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 3093 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 3094 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 3095 3096 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 3097 Intrinsic::fma, 3098 Ops[0]->getType()), 3099 Ops); 3100 3101 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()), 3102 Rep, (uint64_t)0); 3103 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") || 3104 Name.startswith("avx512.maskz.vfmadd.s") || 3105 Name.startswith("avx512.mask3.vfmadd.s") || 3106 Name.startswith("avx512.mask3.vfmsub.s") || 3107 Name.startswith("avx512.mask3.vfnmsub.s"))) { 3108 bool IsMask3 = Name[11] == '3'; 3109 bool IsMaskZ = Name[11] == 'z'; 3110 // Drop the "avx512.mask." to make it easier. 3111 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3112 bool NegMul = Name[2] == 'n'; 3113 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3114 3115 Value *A = CI->getArgOperand(0); 3116 Value *B = CI->getArgOperand(1); 3117 Value *C = CI->getArgOperand(2); 3118 3119 if (NegMul && (IsMask3 || IsMaskZ)) 3120 A = Builder.CreateFNeg(A); 3121 if (NegMul && !(IsMask3 || IsMaskZ)) 3122 B = Builder.CreateFNeg(B); 3123 if (NegAcc) 3124 C = Builder.CreateFNeg(C); 3125 3126 A = Builder.CreateExtractElement(A, (uint64_t)0); 3127 B = Builder.CreateExtractElement(B, (uint64_t)0); 3128 C = Builder.CreateExtractElement(C, (uint64_t)0); 3129 3130 if (!isa<ConstantInt>(CI->getArgOperand(4)) || 3131 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) { 3132 Value *Ops[] = { A, B, C, CI->getArgOperand(4) }; 3133 3134 Intrinsic::ID IID; 3135 if (Name.back() == 'd') 3136 IID = Intrinsic::x86_avx512_vfmadd_f64; 3137 else 3138 IID = Intrinsic::x86_avx512_vfmadd_f32; 3139 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID); 3140 Rep = Builder.CreateCall(FMA, Ops); 3141 } else { 3142 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3143 Intrinsic::fma, 3144 A->getType()); 3145 Rep = Builder.CreateCall(FMA, { A, B, C }); 3146 } 3147 3148 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) : 3149 IsMask3 ? C : A; 3150 3151 // For Mask3 with NegAcc, we need to create a new extractelement that 3152 // avoids the negation above. 3153 if (NegAcc && IsMask3) 3154 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2), 3155 (uint64_t)0); 3156 3157 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3), 3158 Rep, PassThru); 3159 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0), 3160 Rep, (uint64_t)0); 3161 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") || 3162 Name.startswith("avx512.mask.vfnmadd.p") || 3163 Name.startswith("avx512.mask.vfnmsub.p") || 3164 Name.startswith("avx512.mask3.vfmadd.p") || 3165 Name.startswith("avx512.mask3.vfmsub.p") || 3166 Name.startswith("avx512.mask3.vfnmsub.p") || 3167 Name.startswith("avx512.maskz.vfmadd.p"))) { 3168 bool IsMask3 = Name[11] == '3'; 3169 bool IsMaskZ = Name[11] == 'z'; 3170 // Drop the "avx512.mask." to make it easier. 3171 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3172 bool NegMul = Name[2] == 'n'; 3173 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 3174 3175 Value *A = CI->getArgOperand(0); 3176 Value *B = CI->getArgOperand(1); 3177 Value *C = CI->getArgOperand(2); 3178 3179 if (NegMul && (IsMask3 || IsMaskZ)) 3180 A = Builder.CreateFNeg(A); 3181 if (NegMul && !(IsMask3 || IsMaskZ)) 3182 B = Builder.CreateFNeg(B); 3183 if (NegAcc) 3184 C = Builder.CreateFNeg(C); 3185 3186 if (CI->getNumArgOperands() == 5 && 3187 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3188 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3189 Intrinsic::ID IID; 3190 // Check the character before ".512" in string. 3191 if (Name[Name.size()-5] == 's') 3192 IID = Intrinsic::x86_avx512_vfmadd_ps_512; 3193 else 3194 IID = Intrinsic::x86_avx512_vfmadd_pd_512; 3195 3196 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3197 { A, B, C, CI->getArgOperand(4) }); 3198 } else { 3199 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 3200 Intrinsic::fma, 3201 A->getType()); 3202 Rep = Builder.CreateCall(FMA, { A, B, C }); 3203 } 3204 3205 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3206 IsMask3 ? CI->getArgOperand(2) : 3207 CI->getArgOperand(0); 3208 3209 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3210 } else if (IsX86 && (Name.startswith("fma.vfmaddsub.p") || 3211 Name.startswith("fma.vfmsubadd.p"))) { 3212 bool IsSubAdd = Name[7] == 's'; 3213 int NumElts = CI->getType()->getVectorNumElements(); 3214 3215 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3216 CI->getArgOperand(2) }; 3217 3218 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3219 Ops[0]->getType()); 3220 Value *Odd = Builder.CreateCall(FMA, Ops); 3221 Ops[2] = Builder.CreateFNeg(Ops[2]); 3222 Value *Even = Builder.CreateCall(FMA, Ops); 3223 3224 if (IsSubAdd) 3225 std::swap(Even, Odd); 3226 3227 SmallVector<uint32_t, 32> Idxs(NumElts); 3228 for (int i = 0; i != NumElts; ++i) 3229 Idxs[i] = i + (i % 2) * NumElts; 3230 3231 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3232 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") || 3233 Name.startswith("avx512.mask3.vfmaddsub.p") || 3234 Name.startswith("avx512.maskz.vfmaddsub.p") || 3235 Name.startswith("avx512.mask3.vfmsubadd.p"))) { 3236 bool IsMask3 = Name[11] == '3'; 3237 bool IsMaskZ = Name[11] == 'z'; 3238 // Drop the "avx512.mask." to make it easier. 3239 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3240 bool IsSubAdd = Name[3] == 's'; 3241 if (CI->getNumArgOperands() == 5 && 3242 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3243 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3244 Intrinsic::ID IID; 3245 // Check the character before ".512" in string. 3246 if (Name[Name.size()-5] == 's') 3247 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512; 3248 else 3249 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512; 3250 3251 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3252 CI->getArgOperand(2), CI->getArgOperand(4) }; 3253 if (IsSubAdd) 3254 Ops[2] = Builder.CreateFNeg(Ops[2]); 3255 3256 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3257 {CI->getArgOperand(0), CI->getArgOperand(1), 3258 CI->getArgOperand(2), CI->getArgOperand(4)}); 3259 } else { 3260 int NumElts = CI->getType()->getVectorNumElements(); 3261 3262 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3263 CI->getArgOperand(2) }; 3264 3265 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3266 Ops[0]->getType()); 3267 Value *Odd = Builder.CreateCall(FMA, Ops); 3268 Ops[2] = Builder.CreateFNeg(Ops[2]); 3269 Value *Even = Builder.CreateCall(FMA, Ops); 3270 3271 if (IsSubAdd) 3272 std::swap(Even, Odd); 3273 3274 SmallVector<uint32_t, 32> Idxs(NumElts); 3275 for (int i = 0; i != NumElts; ++i) 3276 Idxs[i] = i + (i % 2) * NumElts; 3277 3278 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3279 } 3280 3281 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3282 IsMask3 ? CI->getArgOperand(2) : 3283 CI->getArgOperand(0); 3284 3285 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3286 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") || 3287 Name.startswith("avx512.maskz.pternlog."))) { 3288 bool ZeroMask = Name[11] == 'z'; 3289 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3290 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 3291 Intrinsic::ID IID; 3292 if (VecWidth == 128 && EltWidth == 32) 3293 IID = Intrinsic::x86_avx512_pternlog_d_128; 3294 else if (VecWidth == 256 && EltWidth == 32) 3295 IID = Intrinsic::x86_avx512_pternlog_d_256; 3296 else if (VecWidth == 512 && EltWidth == 32) 3297 IID = Intrinsic::x86_avx512_pternlog_d_512; 3298 else if (VecWidth == 128 && EltWidth == 64) 3299 IID = Intrinsic::x86_avx512_pternlog_q_128; 3300 else if (VecWidth == 256 && EltWidth == 64) 3301 IID = Intrinsic::x86_avx512_pternlog_q_256; 3302 else if (VecWidth == 512 && EltWidth == 64) 3303 IID = Intrinsic::x86_avx512_pternlog_q_512; 3304 else 3305 llvm_unreachable("Unexpected intrinsic"); 3306 3307 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3308 CI->getArgOperand(2), CI->getArgOperand(3) }; 3309 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3310 Args); 3311 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3312 : CI->getArgOperand(0); 3313 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru); 3314 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") || 3315 Name.startswith("avx512.maskz.vpmadd52"))) { 3316 bool ZeroMask = Name[11] == 'z'; 3317 bool High = Name[20] == 'h' || Name[21] == 'h'; 3318 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3319 Intrinsic::ID IID; 3320 if (VecWidth == 128 && !High) 3321 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128; 3322 else if (VecWidth == 256 && !High) 3323 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256; 3324 else if (VecWidth == 512 && !High) 3325 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512; 3326 else if (VecWidth == 128 && High) 3327 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128; 3328 else if (VecWidth == 256 && High) 3329 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256; 3330 else if (VecWidth == 512 && High) 3331 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512; 3332 else 3333 llvm_unreachable("Unexpected intrinsic"); 3334 3335 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3336 CI->getArgOperand(2) }; 3337 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3338 Args); 3339 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3340 : CI->getArgOperand(0); 3341 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3342 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") || 3343 Name.startswith("avx512.mask.vpermt2var.") || 3344 Name.startswith("avx512.maskz.vpermt2var."))) { 3345 bool ZeroMask = Name[11] == 'z'; 3346 bool IndexForm = Name[17] == 'i'; 3347 Rep = UpgradeX86VPERMT2Intrinsics(Builder, *CI, ZeroMask, IndexForm); 3348 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") || 3349 Name.startswith("avx512.maskz.vpdpbusd.") || 3350 Name.startswith("avx512.mask.vpdpbusds.") || 3351 Name.startswith("avx512.maskz.vpdpbusds."))) { 3352 bool ZeroMask = Name[11] == 'z'; 3353 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3354 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3355 Intrinsic::ID IID; 3356 if (VecWidth == 128 && !IsSaturating) 3357 IID = Intrinsic::x86_avx512_vpdpbusd_128; 3358 else if (VecWidth == 256 && !IsSaturating) 3359 IID = Intrinsic::x86_avx512_vpdpbusd_256; 3360 else if (VecWidth == 512 && !IsSaturating) 3361 IID = Intrinsic::x86_avx512_vpdpbusd_512; 3362 else if (VecWidth == 128 && IsSaturating) 3363 IID = Intrinsic::x86_avx512_vpdpbusds_128; 3364 else if (VecWidth == 256 && IsSaturating) 3365 IID = Intrinsic::x86_avx512_vpdpbusds_256; 3366 else if (VecWidth == 512 && IsSaturating) 3367 IID = Intrinsic::x86_avx512_vpdpbusds_512; 3368 else 3369 llvm_unreachable("Unexpected intrinsic"); 3370 3371 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3372 CI->getArgOperand(2) }; 3373 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3374 Args); 3375 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3376 : CI->getArgOperand(0); 3377 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3378 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") || 3379 Name.startswith("avx512.maskz.vpdpwssd.") || 3380 Name.startswith("avx512.mask.vpdpwssds.") || 3381 Name.startswith("avx512.maskz.vpdpwssds."))) { 3382 bool ZeroMask = Name[11] == 'z'; 3383 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3384 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3385 Intrinsic::ID IID; 3386 if (VecWidth == 128 && !IsSaturating) 3387 IID = Intrinsic::x86_avx512_vpdpwssd_128; 3388 else if (VecWidth == 256 && !IsSaturating) 3389 IID = Intrinsic::x86_avx512_vpdpwssd_256; 3390 else if (VecWidth == 512 && !IsSaturating) 3391 IID = Intrinsic::x86_avx512_vpdpwssd_512; 3392 else if (VecWidth == 128 && IsSaturating) 3393 IID = Intrinsic::x86_avx512_vpdpwssds_128; 3394 else if (VecWidth == 256 && IsSaturating) 3395 IID = Intrinsic::x86_avx512_vpdpwssds_256; 3396 else if (VecWidth == 512 && IsSaturating) 3397 IID = Intrinsic::x86_avx512_vpdpwssds_512; 3398 else 3399 llvm_unreachable("Unexpected intrinsic"); 3400 3401 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3402 CI->getArgOperand(2) }; 3403 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3404 Args); 3405 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3406 : CI->getArgOperand(0); 3407 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3408 } else if (IsX86 && (Name == "addcarryx.u32" || Name == "addcarryx.u64" || 3409 Name == "addcarry.u32" || Name == "addcarry.u64" || 3410 Name == "subborrow.u32" || Name == "subborrow.u64")) { 3411 Intrinsic::ID IID; 3412 if (Name[0] == 'a' && Name.back() == '2') 3413 IID = Intrinsic::x86_addcarry_32; 3414 else if (Name[0] == 'a' && Name.back() == '4') 3415 IID = Intrinsic::x86_addcarry_64; 3416 else if (Name[0] == 's' && Name.back() == '2') 3417 IID = Intrinsic::x86_subborrow_32; 3418 else if (Name[0] == 's' && Name.back() == '4') 3419 IID = Intrinsic::x86_subborrow_64; 3420 else 3421 llvm_unreachable("Unexpected intrinsic"); 3422 3423 // Make a call with 3 operands. 3424 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3425 CI->getArgOperand(2)}; 3426 Value *NewCall = Builder.CreateCall( 3427 Intrinsic::getDeclaration(CI->getModule(), IID), 3428 Args); 3429 3430 // Extract the second result and store it. 3431 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3432 // Cast the pointer to the right type. 3433 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(3), 3434 llvm::PointerType::getUnqual(Data->getType())); 3435 Builder.CreateAlignedStore(Data, Ptr, 1); 3436 // Replace the original call result with the first result of the new call. 3437 Value *CF = Builder.CreateExtractValue(NewCall, 0); 3438 3439 CI->replaceAllUsesWith(CF); 3440 Rep = nullptr; 3441 } else if (IsX86 && Name.startswith("avx512.mask.") && 3442 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) { 3443 // Rep will be updated by the call in the condition. 3444 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 3445 Value *Arg = CI->getArgOperand(0); 3446 Value *Neg = Builder.CreateNeg(Arg, "neg"); 3447 Value *Cmp = Builder.CreateICmpSGE( 3448 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 3449 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 3450 } else if (IsNVVM && (Name.startswith("atomic.load.add.f32.p") || 3451 Name.startswith("atomic.load.add.f64.p"))) { 3452 Value *Ptr = CI->getArgOperand(0); 3453 Value *Val = CI->getArgOperand(1); 3454 Rep = Builder.CreateAtomicRMW(AtomicRMWInst::FAdd, Ptr, Val, 3455 AtomicOrdering::SequentiallyConsistent); 3456 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 3457 Name == "max.ui" || Name == "max.ull")) { 3458 Value *Arg0 = CI->getArgOperand(0); 3459 Value *Arg1 = CI->getArgOperand(1); 3460 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3461 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 3462 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 3463 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 3464 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 3465 Name == "min.ui" || Name == "min.ull")) { 3466 Value *Arg0 = CI->getArgOperand(0); 3467 Value *Arg1 = CI->getArgOperand(1); 3468 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3469 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 3470 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 3471 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 3472 } else if (IsNVVM && Name == "clz.ll") { 3473 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 3474 Value *Arg = CI->getArgOperand(0); 3475 Value *Ctlz = Builder.CreateCall( 3476 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 3477 {Arg->getType()}), 3478 {Arg, Builder.getFalse()}, "ctlz"); 3479 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 3480 } else if (IsNVVM && Name == "popc.ll") { 3481 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 3482 // i64. 3483 Value *Arg = CI->getArgOperand(0); 3484 Value *Popc = Builder.CreateCall( 3485 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 3486 {Arg->getType()}), 3487 Arg, "ctpop"); 3488 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 3489 } else if (IsNVVM && Name == "h2f") { 3490 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 3491 F->getParent(), Intrinsic::convert_from_fp16, 3492 {Builder.getFloatTy()}), 3493 CI->getArgOperand(0), "h2f"); 3494 } else { 3495 llvm_unreachable("Unknown function for CallInst upgrade."); 3496 } 3497 3498 if (Rep) 3499 CI->replaceAllUsesWith(Rep); 3500 CI->eraseFromParent(); 3501 return; 3502 } 3503 3504 const auto &DefaultCase = [&NewFn, &CI]() -> void { 3505 // Handle generic mangling change, but nothing else 3506 assert( 3507 (CI->getCalledFunction()->getName() != NewFn->getName()) && 3508 "Unknown function for CallInst upgrade and isn't just a name change"); 3509 CI->setCalledFunction(NewFn); 3510 }; 3511 CallInst *NewCall = nullptr; 3512 switch (NewFn->getIntrinsicID()) { 3513 default: { 3514 DefaultCase(); 3515 return; 3516 } 3517 case Intrinsic::experimental_vector_reduce_v2_fmul: { 3518 SmallVector<Value *, 2> Args; 3519 if (CI->isFast()) 3520 Args.push_back(ConstantFP::get(CI->getOperand(0)->getType(), 1.0)); 3521 else 3522 Args.push_back(CI->getOperand(0)); 3523 Args.push_back(CI->getOperand(1)); 3524 NewCall = Builder.CreateCall(NewFn, Args); 3525 cast<Instruction>(NewCall)->copyFastMathFlags(CI); 3526 break; 3527 } 3528 case Intrinsic::experimental_vector_reduce_v2_fadd: { 3529 SmallVector<Value *, 2> Args; 3530 if (CI->isFast()) 3531 Args.push_back(Constant::getNullValue(CI->getOperand(0)->getType())); 3532 else 3533 Args.push_back(CI->getOperand(0)); 3534 Args.push_back(CI->getOperand(1)); 3535 NewCall = Builder.CreateCall(NewFn, Args); 3536 cast<Instruction>(NewCall)->copyFastMathFlags(CI); 3537 break; 3538 } 3539 case Intrinsic::arm_neon_vld1: 3540 case Intrinsic::arm_neon_vld2: 3541 case Intrinsic::arm_neon_vld3: 3542 case Intrinsic::arm_neon_vld4: 3543 case Intrinsic::arm_neon_vld2lane: 3544 case Intrinsic::arm_neon_vld3lane: 3545 case Intrinsic::arm_neon_vld4lane: 3546 case Intrinsic::arm_neon_vst1: 3547 case Intrinsic::arm_neon_vst2: 3548 case Intrinsic::arm_neon_vst3: 3549 case Intrinsic::arm_neon_vst4: 3550 case Intrinsic::arm_neon_vst2lane: 3551 case Intrinsic::arm_neon_vst3lane: 3552 case Intrinsic::arm_neon_vst4lane: { 3553 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3554 CI->arg_operands().end()); 3555 NewCall = Builder.CreateCall(NewFn, Args); 3556 break; 3557 } 3558 3559 case Intrinsic::bitreverse: 3560 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3561 break; 3562 3563 case Intrinsic::ctlz: 3564 case Intrinsic::cttz: 3565 assert(CI->getNumArgOperands() == 1 && 3566 "Mismatch between function args and call args"); 3567 NewCall = 3568 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 3569 break; 3570 3571 case Intrinsic::objectsize: { 3572 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 3573 ? Builder.getFalse() 3574 : CI->getArgOperand(2); 3575 Value *Dynamic = 3576 CI->getNumArgOperands() < 4 ? Builder.getFalse() : CI->getArgOperand(3); 3577 NewCall = Builder.CreateCall( 3578 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize, Dynamic}); 3579 break; 3580 } 3581 3582 case Intrinsic::ctpop: 3583 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3584 break; 3585 3586 case Intrinsic::convert_from_fp16: 3587 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3588 break; 3589 3590 case Intrinsic::dbg_value: 3591 // Upgrade from the old version that had an extra offset argument. 3592 assert(CI->getNumArgOperands() == 4); 3593 // Drop nonzero offsets instead of attempting to upgrade them. 3594 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1))) 3595 if (Offset->isZeroValue()) { 3596 NewCall = Builder.CreateCall( 3597 NewFn, 3598 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)}); 3599 break; 3600 } 3601 CI->eraseFromParent(); 3602 return; 3603 3604 case Intrinsic::x86_xop_vfrcz_ss: 3605 case Intrinsic::x86_xop_vfrcz_sd: 3606 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 3607 break; 3608 3609 case Intrinsic::x86_xop_vpermil2pd: 3610 case Intrinsic::x86_xop_vpermil2ps: 3611 case Intrinsic::x86_xop_vpermil2pd_256: 3612 case Intrinsic::x86_xop_vpermil2ps_256: { 3613 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3614 CI->arg_operands().end()); 3615 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 3616 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 3617 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 3618 NewCall = Builder.CreateCall(NewFn, Args); 3619 break; 3620 } 3621 3622 case Intrinsic::x86_sse41_ptestc: 3623 case Intrinsic::x86_sse41_ptestz: 3624 case Intrinsic::x86_sse41_ptestnzc: { 3625 // The arguments for these intrinsics used to be v4f32, and changed 3626 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 3627 // So, the only thing required is a bitcast for both arguments. 3628 // First, check the arguments have the old type. 3629 Value *Arg0 = CI->getArgOperand(0); 3630 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 3631 return; 3632 3633 // Old intrinsic, add bitcasts 3634 Value *Arg1 = CI->getArgOperand(1); 3635 3636 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 3637 3638 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 3639 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 3640 3641 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 3642 break; 3643 } 3644 3645 case Intrinsic::x86_rdtscp: { 3646 // This used to take 1 arguments. If we have no arguments, it is already 3647 // upgraded. 3648 if (CI->getNumOperands() == 0) 3649 return; 3650 3651 NewCall = Builder.CreateCall(NewFn); 3652 // Extract the second result and store it. 3653 Value *Data = Builder.CreateExtractValue(NewCall, 1); 3654 // Cast the pointer to the right type. 3655 Value *Ptr = Builder.CreateBitCast(CI->getArgOperand(0), 3656 llvm::PointerType::getUnqual(Data->getType())); 3657 Builder.CreateAlignedStore(Data, Ptr, 1); 3658 // Replace the original call result with the first result of the new call. 3659 Value *TSC = Builder.CreateExtractValue(NewCall, 0); 3660 3661 std::string Name = CI->getName(); 3662 if (!Name.empty()) { 3663 CI->setName(Name + ".old"); 3664 NewCall->setName(Name); 3665 } 3666 CI->replaceAllUsesWith(TSC); 3667 CI->eraseFromParent(); 3668 return; 3669 } 3670 3671 case Intrinsic::x86_sse41_insertps: 3672 case Intrinsic::x86_sse41_dppd: 3673 case Intrinsic::x86_sse41_dpps: 3674 case Intrinsic::x86_sse41_mpsadbw: 3675 case Intrinsic::x86_avx_dp_ps_256: 3676 case Intrinsic::x86_avx2_mpsadbw: { 3677 // Need to truncate the last argument from i32 to i8 -- this argument models 3678 // an inherently 8-bit immediate operand to these x86 instructions. 3679 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3680 CI->arg_operands().end()); 3681 3682 // Replace the last argument with a trunc. 3683 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 3684 NewCall = Builder.CreateCall(NewFn, Args); 3685 break; 3686 } 3687 3688 case Intrinsic::thread_pointer: { 3689 NewCall = Builder.CreateCall(NewFn, {}); 3690 break; 3691 } 3692 3693 case Intrinsic::invariant_start: 3694 case Intrinsic::invariant_end: 3695 case Intrinsic::masked_load: 3696 case Intrinsic::masked_store: 3697 case Intrinsic::masked_gather: 3698 case Intrinsic::masked_scatter: { 3699 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3700 CI->arg_operands().end()); 3701 NewCall = Builder.CreateCall(NewFn, Args); 3702 break; 3703 } 3704 3705 case Intrinsic::memcpy: 3706 case Intrinsic::memmove: 3707 case Intrinsic::memset: { 3708 // We have to make sure that the call signature is what we're expecting. 3709 // We only want to change the old signatures by removing the alignment arg: 3710 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1) 3711 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1) 3712 // @llvm.memset...(i8*, i8, i[32|64], i32, i1) 3713 // -> @llvm.memset...(i8*, i8, i[32|64], i1) 3714 // Note: i8*'s in the above can be any pointer type 3715 if (CI->getNumArgOperands() != 5) { 3716 DefaultCase(); 3717 return; 3718 } 3719 // Remove alignment argument (3), and add alignment attributes to the 3720 // dest/src pointers. 3721 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), 3722 CI->getArgOperand(2), CI->getArgOperand(4)}; 3723 NewCall = Builder.CreateCall(NewFn, Args); 3724 auto *MemCI = cast<MemIntrinsic>(NewCall); 3725 // All mem intrinsics support dest alignment. 3726 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3)); 3727 MemCI->setDestAlignment(Align->getZExtValue()); 3728 // Memcpy/Memmove also support source alignment. 3729 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) 3730 MTI->setSourceAlignment(Align->getZExtValue()); 3731 break; 3732 } 3733 } 3734 assert(NewCall && "Should have either set this variable or returned through " 3735 "the default case"); 3736 std::string Name = CI->getName(); 3737 if (!Name.empty()) { 3738 CI->setName(Name + ".old"); 3739 NewCall->setName(Name); 3740 } 3741 CI->replaceAllUsesWith(NewCall); 3742 CI->eraseFromParent(); 3743 } 3744 3745 void llvm::UpgradeCallsToIntrinsic(Function *F) { 3746 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 3747 3748 // Check if this function should be upgraded and get the replacement function 3749 // if there is one. 3750 Function *NewFn; 3751 if (UpgradeIntrinsicFunction(F, NewFn)) { 3752 // Replace all users of the old function with the new function or new 3753 // instructions. This is not a range loop because the call is deleted. 3754 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 3755 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 3756 UpgradeIntrinsicCall(CI, NewFn); 3757 3758 // Remove old function, no longer used, from the module. 3759 F->eraseFromParent(); 3760 } 3761 } 3762 3763 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 3764 // Check if the tag uses struct-path aware TBAA format. 3765 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 3766 return &MD; 3767 3768 auto &Context = MD.getContext(); 3769 if (MD.getNumOperands() == 3) { 3770 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 3771 MDNode *ScalarType = MDNode::get(Context, Elts); 3772 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 3773 Metadata *Elts2[] = {ScalarType, ScalarType, 3774 ConstantAsMetadata::get( 3775 Constant::getNullValue(Type::getInt64Ty(Context))), 3776 MD.getOperand(2)}; 3777 return MDNode::get(Context, Elts2); 3778 } 3779 // Create a MDNode <MD, MD, offset 0> 3780 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 3781 Type::getInt64Ty(Context)))}; 3782 return MDNode::get(Context, Elts); 3783 } 3784 3785 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 3786 Instruction *&Temp) { 3787 if (Opc != Instruction::BitCast) 3788 return nullptr; 3789 3790 Temp = nullptr; 3791 Type *SrcTy = V->getType(); 3792 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3793 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3794 LLVMContext &Context = V->getContext(); 3795 3796 // We have no information about target data layout, so we assume that 3797 // the maximum pointer size is 64bit. 3798 Type *MidTy = Type::getInt64Ty(Context); 3799 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 3800 3801 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 3802 } 3803 3804 return nullptr; 3805 } 3806 3807 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 3808 if (Opc != Instruction::BitCast) 3809 return nullptr; 3810 3811 Type *SrcTy = C->getType(); 3812 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3813 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3814 LLVMContext &Context = C->getContext(); 3815 3816 // We have no information about target data layout, so we assume that 3817 // the maximum pointer size is 64bit. 3818 Type *MidTy = Type::getInt64Ty(Context); 3819 3820 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 3821 DestTy); 3822 } 3823 3824 return nullptr; 3825 } 3826 3827 /// Check the debug info version number, if it is out-dated, drop the debug 3828 /// info. Return true if module is modified. 3829 bool llvm::UpgradeDebugInfo(Module &M) { 3830 unsigned Version = getDebugMetadataVersionFromModule(M); 3831 if (Version == DEBUG_METADATA_VERSION) { 3832 bool BrokenDebugInfo = false; 3833 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo)) 3834 report_fatal_error("Broken module found, compilation aborted!"); 3835 if (!BrokenDebugInfo) 3836 // Everything is ok. 3837 return false; 3838 else { 3839 // Diagnose malformed debug info. 3840 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M); 3841 M.getContext().diagnose(Diag); 3842 } 3843 } 3844 bool Modified = StripDebugInfo(M); 3845 if (Modified && Version != DEBUG_METADATA_VERSION) { 3846 // Diagnose a version mismatch. 3847 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 3848 M.getContext().diagnose(DiagVersion); 3849 } 3850 return Modified; 3851 } 3852 3853 /// This checks for objc retain release marker which should be upgraded. It 3854 /// returns true if module is modified. 3855 static bool UpgradeRetainReleaseMarker(Module &M) { 3856 bool Changed = false; 3857 const char *MarkerKey = "clang.arc.retainAutoreleasedReturnValueMarker"; 3858 NamedMDNode *ModRetainReleaseMarker = M.getNamedMetadata(MarkerKey); 3859 if (ModRetainReleaseMarker) { 3860 MDNode *Op = ModRetainReleaseMarker->getOperand(0); 3861 if (Op) { 3862 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0)); 3863 if (ID) { 3864 SmallVector<StringRef, 4> ValueComp; 3865 ID->getString().split(ValueComp, "#"); 3866 if (ValueComp.size() == 2) { 3867 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str(); 3868 ID = MDString::get(M.getContext(), NewValue); 3869 } 3870 M.addModuleFlag(Module::Error, MarkerKey, ID); 3871 M.eraseNamedMetadata(ModRetainReleaseMarker); 3872 Changed = true; 3873 } 3874 } 3875 } 3876 return Changed; 3877 } 3878 3879 void llvm::UpgradeARCRuntime(Module &M) { 3880 // This lambda converts normal function calls to ARC runtime functions to 3881 // intrinsic calls. 3882 auto UpgradeToIntrinsic = [&](const char *OldFunc, 3883 llvm::Intrinsic::ID IntrinsicFunc) { 3884 Function *Fn = M.getFunction(OldFunc); 3885 3886 if (!Fn) 3887 return; 3888 3889 Function *NewFn = llvm::Intrinsic::getDeclaration(&M, IntrinsicFunc); 3890 3891 for (auto I = Fn->user_begin(), E = Fn->user_end(); I != E;) { 3892 CallInst *CI = dyn_cast<CallInst>(*I++); 3893 if (!CI || CI->getCalledFunction() != Fn) 3894 continue; 3895 3896 IRBuilder<> Builder(CI->getParent(), CI->getIterator()); 3897 FunctionType *NewFuncTy = NewFn->getFunctionType(); 3898 SmallVector<Value *, 2> Args; 3899 3900 // Don't upgrade the intrinsic if it's not valid to bitcast the return 3901 // value to the return type of the old function. 3902 if (NewFuncTy->getReturnType() != CI->getType() && 3903 !CastInst::castIsValid(Instruction::BitCast, CI, 3904 NewFuncTy->getReturnType())) 3905 continue; 3906 3907 bool InvalidCast = false; 3908 3909 for (unsigned I = 0, E = CI->getNumArgOperands(); I != E; ++I) { 3910 Value *Arg = CI->getArgOperand(I); 3911 3912 // Bitcast argument to the parameter type of the new function if it's 3913 // not a variadic argument. 3914 if (I < NewFuncTy->getNumParams()) { 3915 // Don't upgrade the intrinsic if it's not valid to bitcast the argument 3916 // to the parameter type of the new function. 3917 if (!CastInst::castIsValid(Instruction::BitCast, Arg, 3918 NewFuncTy->getParamType(I))) { 3919 InvalidCast = true; 3920 break; 3921 } 3922 Arg = Builder.CreateBitCast(Arg, NewFuncTy->getParamType(I)); 3923 } 3924 Args.push_back(Arg); 3925 } 3926 3927 if (InvalidCast) 3928 continue; 3929 3930 // Create a call instruction that calls the new function. 3931 CallInst *NewCall = Builder.CreateCall(NewFuncTy, NewFn, Args); 3932 NewCall->setTailCallKind(cast<CallInst>(CI)->getTailCallKind()); 3933 NewCall->setName(CI->getName()); 3934 3935 // Bitcast the return value back to the type of the old call. 3936 Value *NewRetVal = Builder.CreateBitCast(NewCall, CI->getType()); 3937 3938 if (!CI->use_empty()) 3939 CI->replaceAllUsesWith(NewRetVal); 3940 CI->eraseFromParent(); 3941 } 3942 3943 if (Fn->use_empty()) 3944 Fn->eraseFromParent(); 3945 }; 3946 3947 // Unconditionally convert a call to "clang.arc.use" to a call to 3948 // "llvm.objc.clang.arc.use". 3949 UpgradeToIntrinsic("clang.arc.use", llvm::Intrinsic::objc_clang_arc_use); 3950 3951 // Upgrade the retain release marker. If there is no need to upgrade 3952 // the marker, that means either the module is already new enough to contain 3953 // new intrinsics or it is not ARC. There is no need to upgrade runtime call. 3954 if (!UpgradeRetainReleaseMarker(M)) 3955 return; 3956 3957 std::pair<const char *, llvm::Intrinsic::ID> RuntimeFuncs[] = { 3958 {"objc_autorelease", llvm::Intrinsic::objc_autorelease}, 3959 {"objc_autoreleasePoolPop", llvm::Intrinsic::objc_autoreleasePoolPop}, 3960 {"objc_autoreleasePoolPush", llvm::Intrinsic::objc_autoreleasePoolPush}, 3961 {"objc_autoreleaseReturnValue", 3962 llvm::Intrinsic::objc_autoreleaseReturnValue}, 3963 {"objc_copyWeak", llvm::Intrinsic::objc_copyWeak}, 3964 {"objc_destroyWeak", llvm::Intrinsic::objc_destroyWeak}, 3965 {"objc_initWeak", llvm::Intrinsic::objc_initWeak}, 3966 {"objc_loadWeak", llvm::Intrinsic::objc_loadWeak}, 3967 {"objc_loadWeakRetained", llvm::Intrinsic::objc_loadWeakRetained}, 3968 {"objc_moveWeak", llvm::Intrinsic::objc_moveWeak}, 3969 {"objc_release", llvm::Intrinsic::objc_release}, 3970 {"objc_retain", llvm::Intrinsic::objc_retain}, 3971 {"objc_retainAutorelease", llvm::Intrinsic::objc_retainAutorelease}, 3972 {"objc_retainAutoreleaseReturnValue", 3973 llvm::Intrinsic::objc_retainAutoreleaseReturnValue}, 3974 {"objc_retainAutoreleasedReturnValue", 3975 llvm::Intrinsic::objc_retainAutoreleasedReturnValue}, 3976 {"objc_retainBlock", llvm::Intrinsic::objc_retainBlock}, 3977 {"objc_storeStrong", llvm::Intrinsic::objc_storeStrong}, 3978 {"objc_storeWeak", llvm::Intrinsic::objc_storeWeak}, 3979 {"objc_unsafeClaimAutoreleasedReturnValue", 3980 llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue}, 3981 {"objc_retainedObject", llvm::Intrinsic::objc_retainedObject}, 3982 {"objc_unretainedObject", llvm::Intrinsic::objc_unretainedObject}, 3983 {"objc_unretainedPointer", llvm::Intrinsic::objc_unretainedPointer}, 3984 {"objc_retain_autorelease", llvm::Intrinsic::objc_retain_autorelease}, 3985 {"objc_sync_enter", llvm::Intrinsic::objc_sync_enter}, 3986 {"objc_sync_exit", llvm::Intrinsic::objc_sync_exit}, 3987 {"objc_arc_annotation_topdown_bbstart", 3988 llvm::Intrinsic::objc_arc_annotation_topdown_bbstart}, 3989 {"objc_arc_annotation_topdown_bbend", 3990 llvm::Intrinsic::objc_arc_annotation_topdown_bbend}, 3991 {"objc_arc_annotation_bottomup_bbstart", 3992 llvm::Intrinsic::objc_arc_annotation_bottomup_bbstart}, 3993 {"objc_arc_annotation_bottomup_bbend", 3994 llvm::Intrinsic::objc_arc_annotation_bottomup_bbend}}; 3995 3996 for (auto &I : RuntimeFuncs) 3997 UpgradeToIntrinsic(I.first, I.second); 3998 } 3999 4000 bool llvm::UpgradeModuleFlags(Module &M) { 4001 NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 4002 if (!ModFlags) 4003 return false; 4004 4005 bool HasObjCFlag = false, HasClassProperties = false, Changed = false; 4006 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 4007 MDNode *Op = ModFlags->getOperand(I); 4008 if (Op->getNumOperands() != 3) 4009 continue; 4010 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 4011 if (!ID) 4012 continue; 4013 if (ID->getString() == "Objective-C Image Info Version") 4014 HasObjCFlag = true; 4015 if (ID->getString() == "Objective-C Class Properties") 4016 HasClassProperties = true; 4017 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two 4018 // field was Error and now they are Max. 4019 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") { 4020 if (auto *Behavior = 4021 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) { 4022 if (Behavior->getLimitedValue() == Module::Error) { 4023 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 4024 Metadata *Ops[3] = { 4025 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)), 4026 MDString::get(M.getContext(), ID->getString()), 4027 Op->getOperand(2)}; 4028 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 4029 Changed = true; 4030 } 4031 } 4032 } 4033 // Upgrade Objective-C Image Info Section. Removed the whitespce in the 4034 // section name so that llvm-lto will not complain about mismatching 4035 // module flags that is functionally the same. 4036 if (ID->getString() == "Objective-C Image Info Section") { 4037 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) { 4038 SmallVector<StringRef, 4> ValueComp; 4039 Value->getString().split(ValueComp, " "); 4040 if (ValueComp.size() != 1) { 4041 std::string NewValue; 4042 for (auto &S : ValueComp) 4043 NewValue += S.str(); 4044 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1), 4045 MDString::get(M.getContext(), NewValue)}; 4046 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 4047 Changed = true; 4048 } 4049 } 4050 } 4051 } 4052 4053 // "Objective-C Class Properties" is recently added for Objective-C. We 4054 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 4055 // flag of value 0, so we can correclty downgrade this flag when trying to 4056 // link an ObjC bitcode without this module flag with an ObjC bitcode with 4057 // this module flag. 4058 if (HasObjCFlag && !HasClassProperties) { 4059 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 4060 (uint32_t)0); 4061 Changed = true; 4062 } 4063 4064 return Changed; 4065 } 4066 4067 void llvm::UpgradeSectionAttributes(Module &M) { 4068 auto TrimSpaces = [](StringRef Section) -> std::string { 4069 SmallVector<StringRef, 5> Components; 4070 Section.split(Components, ','); 4071 4072 SmallString<32> Buffer; 4073 raw_svector_ostream OS(Buffer); 4074 4075 for (auto Component : Components) 4076 OS << ',' << Component.trim(); 4077 4078 return OS.str().substr(1); 4079 }; 4080 4081 for (auto &GV : M.globals()) { 4082 if (!GV.hasSection()) 4083 continue; 4084 4085 StringRef Section = GV.getSection(); 4086 4087 if (!Section.startswith("__DATA, __objc_catlist")) 4088 continue; 4089 4090 // __DATA, __objc_catlist, regular, no_dead_strip 4091 // __DATA,__objc_catlist,regular,no_dead_strip 4092 GV.setSection(TrimSpaces(Section)); 4093 } 4094 } 4095 4096 static bool isOldLoopArgument(Metadata *MD) { 4097 auto *T = dyn_cast_or_null<MDTuple>(MD); 4098 if (!T) 4099 return false; 4100 if (T->getNumOperands() < 1) 4101 return false; 4102 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 4103 if (!S) 4104 return false; 4105 return S->getString().startswith("llvm.vectorizer."); 4106 } 4107 4108 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 4109 StringRef OldPrefix = "llvm.vectorizer."; 4110 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 4111 4112 if (OldTag == "llvm.vectorizer.unroll") 4113 return MDString::get(C, "llvm.loop.interleave.count"); 4114 4115 return MDString::get( 4116 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 4117 .str()); 4118 } 4119 4120 static Metadata *upgradeLoopArgument(Metadata *MD) { 4121 auto *T = dyn_cast_or_null<MDTuple>(MD); 4122 if (!T) 4123 return MD; 4124 if (T->getNumOperands() < 1) 4125 return MD; 4126 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 4127 if (!OldTag) 4128 return MD; 4129 if (!OldTag->getString().startswith("llvm.vectorizer.")) 4130 return MD; 4131 4132 // This has an old tag. Upgrade it. 4133 SmallVector<Metadata *, 8> Ops; 4134 Ops.reserve(T->getNumOperands()); 4135 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 4136 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 4137 Ops.push_back(T->getOperand(I)); 4138 4139 return MDTuple::get(T->getContext(), Ops); 4140 } 4141 4142 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 4143 auto *T = dyn_cast<MDTuple>(&N); 4144 if (!T) 4145 return &N; 4146 4147 if (none_of(T->operands(), isOldLoopArgument)) 4148 return &N; 4149 4150 SmallVector<Metadata *, 8> Ops; 4151 Ops.reserve(T->getNumOperands()); 4152 for (Metadata *MD : T->operands()) 4153 Ops.push_back(upgradeLoopArgument(MD)); 4154 4155 return MDTuple::get(T->getContext(), Ops); 4156 } 4157 4158 std::string llvm::UpgradeDataLayoutString(StringRef DL, StringRef TT) { 4159 std::string AddrSpaces = "-p270:32:32-p271:32:32-p272:64:64"; 4160 4161 // If X86, and the datalayout matches the expected format, add pointer size 4162 // address spaces to the datalayout. 4163 Triple::ArchType Arch = Triple(TT).getArch(); 4164 if ((Arch != llvm::Triple::x86 && Arch != llvm::Triple::x86_64) || 4165 DL.contains(AddrSpaces)) 4166 return DL; 4167 4168 SmallVector<StringRef, 4> Groups; 4169 Regex R("(e-m:[a-z](-p:32:32)?)(-[if]64:.*$)"); 4170 if (!R.match(DL, &Groups)) 4171 return DL; 4172 4173 SmallString<1024> Buf; 4174 std::string Res = (Groups[1] + AddrSpaces + Groups[3]).toStringRef(Buf).str(); 4175 return Res; 4176 } 4177