1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the auto-upgrade helper functions. 11 // This is where deprecated IR intrinsics and other IR features are updated to 12 // current specifications. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/IR/AutoUpgrade.h" 17 #include "llvm/ADT/StringSwitch.h" 18 #include "llvm/IR/Constants.h" 19 #include "llvm/IR/DIBuilder.h" 20 #include "llvm/IR/DebugInfo.h" 21 #include "llvm/IR/DiagnosticInfo.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/IRBuilder.h" 24 #include "llvm/IR/Instruction.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/IR/Module.h" 28 #include "llvm/IR/Verifier.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/Regex.h" 31 #include <cstring> 32 using namespace llvm; 33 34 static void rename(GlobalValue *GV) { GV->setName(GV->getName() + ".old"); } 35 36 // Upgrade the declarations of the SSE4.1 ptest intrinsics whose arguments have 37 // changed their type from v4f32 to v2i64. 38 static bool UpgradePTESTIntrinsic(Function* F, Intrinsic::ID IID, 39 Function *&NewFn) { 40 // Check whether this is an old version of the function, which received 41 // v4f32 arguments. 42 Type *Arg0Type = F->getFunctionType()->getParamType(0); 43 if (Arg0Type != VectorType::get(Type::getFloatTy(F->getContext()), 4)) 44 return false; 45 46 // Yes, it's old, replace it with new version. 47 rename(F); 48 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 49 return true; 50 } 51 52 // Upgrade the declarations of intrinsic functions whose 8-bit immediate mask 53 // arguments have changed their type from i32 to i8. 54 static bool UpgradeX86IntrinsicsWith8BitMask(Function *F, Intrinsic::ID IID, 55 Function *&NewFn) { 56 // Check that the last argument is an i32. 57 Type *LastArgType = F->getFunctionType()->getParamType( 58 F->getFunctionType()->getNumParams() - 1); 59 if (!LastArgType->isIntegerTy(32)) 60 return false; 61 62 // Move this function aside and map down. 63 rename(F); 64 NewFn = Intrinsic::getDeclaration(F->getParent(), IID); 65 return true; 66 } 67 68 static bool ShouldUpgradeX86Intrinsic(Function *F, StringRef Name) { 69 // All of the intrinsics matches below should be marked with which llvm 70 // version started autoupgrading them. At some point in the future we would 71 // like to use this information to remove upgrade code for some older 72 // intrinsics. It is currently undecided how we will determine that future 73 // point. 74 if (Name.startswith("sse2.paddus.") || // Added in 8.0 75 Name.startswith("sse2.psubus.") || // Added in 8.0 76 Name.startswith("avx2.paddus.") || // Added in 8.0 77 Name.startswith("avx2.psubus.") || // Added in 8.0 78 Name.startswith("avx512.mask.paddus.") || // Added in 8.0 79 Name.startswith("avx512.mask.psubus.") || // Added in 8.0 80 Name=="ssse3.pabs.b.128" || // Added in 6.0 81 Name=="ssse3.pabs.w.128" || // Added in 6.0 82 Name=="ssse3.pabs.d.128" || // Added in 6.0 83 Name.startswith("fma4.vfmadd.s") || // Added in 7.0 84 Name.startswith("fma.vfmadd.") || // Added in 7.0 85 Name.startswith("fma.vfmsub.") || // Added in 7.0 86 Name.startswith("fma.vfmaddsub.") || // Added in 7.0 87 Name.startswith("fma.vfmsubadd.") || // Added in 7.0 88 Name.startswith("fma.vfnmadd.") || // Added in 7.0 89 Name.startswith("fma.vfnmsub.") || // Added in 7.0 90 Name.startswith("avx512.mask.vfmadd.") || // Added in 7.0 91 Name.startswith("avx512.mask.vfnmadd.") || // Added in 7.0 92 Name.startswith("avx512.mask.vfnmsub.") || // Added in 7.0 93 Name.startswith("avx512.mask3.vfmadd.") || // Added in 7.0 94 Name.startswith("avx512.maskz.vfmadd.") || // Added in 7.0 95 Name.startswith("avx512.mask3.vfmsub.") || // Added in 7.0 96 Name.startswith("avx512.mask3.vfnmsub.") || // Added in 7.0 97 Name.startswith("avx512.mask.vfmaddsub.") || // Added in 7.0 98 Name.startswith("avx512.maskz.vfmaddsub.") || // Added in 7.0 99 Name.startswith("avx512.mask3.vfmaddsub.") || // Added in 7.0 100 Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0 101 Name.startswith("avx512.mask.shuf.i") || // Added in 6.0 102 Name.startswith("avx512.mask.shuf.f") || // Added in 6.0 103 Name.startswith("avx512.kunpck") || //added in 6.0 104 Name.startswith("avx2.pabs.") || // Added in 6.0 105 Name.startswith("avx512.mask.pabs.") || // Added in 6.0 106 Name.startswith("avx512.broadcastm") || // Added in 6.0 107 Name == "sse.sqrt.ss" || // Added in 7.0 108 Name == "sse2.sqrt.sd" || // Added in 7.0 109 Name.startswith("avx512.mask.sqrt.p") || // Added in 7.0 110 Name.startswith("avx.sqrt.p") || // Added in 7.0 111 Name.startswith("sse2.sqrt.p") || // Added in 7.0 112 Name.startswith("sse.sqrt.p") || // Added in 7.0 113 Name.startswith("avx512.mask.pbroadcast") || // Added in 6.0 114 Name.startswith("sse2.pcmpeq.") || // Added in 3.1 115 Name.startswith("sse2.pcmpgt.") || // Added in 3.1 116 Name.startswith("avx2.pcmpeq.") || // Added in 3.1 117 Name.startswith("avx2.pcmpgt.") || // Added in 3.1 118 Name.startswith("avx512.mask.pcmpeq.") || // Added in 3.9 119 Name.startswith("avx512.mask.pcmpgt.") || // Added in 3.9 120 Name.startswith("avx.vperm2f128.") || // Added in 6.0 121 Name == "avx2.vperm2i128" || // Added in 6.0 122 Name == "sse.add.ss" || // Added in 4.0 123 Name == "sse2.add.sd" || // Added in 4.0 124 Name == "sse.sub.ss" || // Added in 4.0 125 Name == "sse2.sub.sd" || // Added in 4.0 126 Name == "sse.mul.ss" || // Added in 4.0 127 Name == "sse2.mul.sd" || // Added in 4.0 128 Name == "sse.div.ss" || // Added in 4.0 129 Name == "sse2.div.sd" || // Added in 4.0 130 Name == "sse41.pmaxsb" || // Added in 3.9 131 Name == "sse2.pmaxs.w" || // Added in 3.9 132 Name == "sse41.pmaxsd" || // Added in 3.9 133 Name == "sse2.pmaxu.b" || // Added in 3.9 134 Name == "sse41.pmaxuw" || // Added in 3.9 135 Name == "sse41.pmaxud" || // Added in 3.9 136 Name == "sse41.pminsb" || // Added in 3.9 137 Name == "sse2.pmins.w" || // Added in 3.9 138 Name == "sse41.pminsd" || // Added in 3.9 139 Name == "sse2.pminu.b" || // Added in 3.9 140 Name == "sse41.pminuw" || // Added in 3.9 141 Name == "sse41.pminud" || // Added in 3.9 142 Name == "avx512.kand.w" || // Added in 7.0 143 Name == "avx512.kandn.w" || // Added in 7.0 144 Name == "avx512.knot.w" || // Added in 7.0 145 Name == "avx512.kor.w" || // Added in 7.0 146 Name == "avx512.kxor.w" || // Added in 7.0 147 Name == "avx512.kxnor.w" || // Added in 7.0 148 Name == "avx512.kortestc.w" || // Added in 7.0 149 Name == "avx512.kortestz.w" || // Added in 7.0 150 Name.startswith("avx512.mask.pshuf.b.") || // Added in 4.0 151 Name.startswith("avx2.pmax") || // Added in 3.9 152 Name.startswith("avx2.pmin") || // Added in 3.9 153 Name.startswith("avx512.mask.pmax") || // Added in 4.0 154 Name.startswith("avx512.mask.pmin") || // Added in 4.0 155 Name.startswith("avx2.vbroadcast") || // Added in 3.8 156 Name.startswith("avx2.pbroadcast") || // Added in 3.8 157 Name.startswith("avx.vpermil.") || // Added in 3.1 158 Name.startswith("sse2.pshuf") || // Added in 3.9 159 Name.startswith("avx512.pbroadcast") || // Added in 3.9 160 Name.startswith("avx512.mask.broadcast.s") || // Added in 3.9 161 Name.startswith("avx512.mask.movddup") || // Added in 3.9 162 Name.startswith("avx512.mask.movshdup") || // Added in 3.9 163 Name.startswith("avx512.mask.movsldup") || // Added in 3.9 164 Name.startswith("avx512.mask.pshuf.d.") || // Added in 3.9 165 Name.startswith("avx512.mask.pshufl.w.") || // Added in 3.9 166 Name.startswith("avx512.mask.pshufh.w.") || // Added in 3.9 167 Name.startswith("avx512.mask.shuf.p") || // Added in 4.0 168 Name.startswith("avx512.mask.vpermil.p") || // Added in 3.9 169 Name.startswith("avx512.mask.perm.df.") || // Added in 3.9 170 Name.startswith("avx512.mask.perm.di.") || // Added in 3.9 171 Name.startswith("avx512.mask.punpckl") || // Added in 3.9 172 Name.startswith("avx512.mask.punpckh") || // Added in 3.9 173 Name.startswith("avx512.mask.unpckl.") || // Added in 3.9 174 Name.startswith("avx512.mask.unpckh.") || // Added in 3.9 175 Name.startswith("avx512.mask.pand.") || // Added in 3.9 176 Name.startswith("avx512.mask.pandn.") || // Added in 3.9 177 Name.startswith("avx512.mask.por.") || // Added in 3.9 178 Name.startswith("avx512.mask.pxor.") || // Added in 3.9 179 Name.startswith("avx512.mask.and.") || // Added in 3.9 180 Name.startswith("avx512.mask.andn.") || // Added in 3.9 181 Name.startswith("avx512.mask.or.") || // Added in 3.9 182 Name.startswith("avx512.mask.xor.") || // Added in 3.9 183 Name.startswith("avx512.mask.padd.") || // Added in 4.0 184 Name.startswith("avx512.mask.psub.") || // Added in 4.0 185 Name.startswith("avx512.mask.pmull.") || // Added in 4.0 186 Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0 187 Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0 188 Name == "avx512.mask.cvtudq2ps.128" || // Added in 7.0 189 Name == "avx512.mask.cvtudq2ps.256" || // Added in 7.0 190 Name == "avx512.mask.cvtqq2pd.128" || // Added in 7.0 191 Name == "avx512.mask.cvtqq2pd.256" || // Added in 7.0 192 Name == "avx512.mask.cvtuqq2pd.128" || // Added in 7.0 193 Name == "avx512.mask.cvtuqq2pd.256" || // Added in 7.0 194 Name == "avx512.mask.cvtdq2ps.128" || // Added in 7.0 195 Name == "avx512.mask.cvtdq2ps.256" || // Added in 7.0 196 Name == "avx512.mask.cvtpd2dq.256" || // Added in 7.0 197 Name == "avx512.mask.cvtpd2ps.256" || // Added in 7.0 198 Name == "avx512.mask.cvttpd2dq.256" || // Added in 7.0 199 Name == "avx512.mask.cvttps2dq.128" || // Added in 7.0 200 Name == "avx512.mask.cvttps2dq.256" || // Added in 7.0 201 Name == "avx512.mask.cvtps2pd.128" || // Added in 7.0 202 Name == "avx512.mask.cvtps2pd.256" || // Added in 7.0 203 Name == "avx512.cvtusi2sd" || // Added in 7.0 204 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 205 Name.startswith("avx512.mask.permvar.") || // Added in 7.0 206 Name == "sse2.pmulu.dq" || // Added in 7.0 207 Name == "sse41.pmuldq" || // Added in 7.0 208 Name == "avx2.pmulu.dq" || // Added in 7.0 209 Name == "avx2.pmul.dq" || // Added in 7.0 210 Name == "avx512.pmulu.dq.512" || // Added in 7.0 211 Name == "avx512.pmul.dq.512" || // Added in 7.0 212 Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0 213 Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0 214 Name.startswith("avx512.mask.pmul.hr.sw.") || // Added in 7.0 215 Name.startswith("avx512.mask.pmulh.w.") || // Added in 7.0 216 Name.startswith("avx512.mask.pmulhu.w.") || // Added in 7.0 217 Name.startswith("avx512.mask.pmaddw.d.") || // Added in 7.0 218 Name.startswith("avx512.mask.pmaddubs.w.") || // Added in 7.0 219 Name.startswith("avx512.mask.packsswb.") || // Added in 5.0 220 Name.startswith("avx512.mask.packssdw.") || // Added in 5.0 221 Name.startswith("avx512.mask.packuswb.") || // Added in 5.0 222 Name.startswith("avx512.mask.packusdw.") || // Added in 5.0 223 Name.startswith("avx512.mask.cmp.b") || // Added in 5.0 224 Name.startswith("avx512.mask.cmp.d") || // Added in 5.0 225 Name.startswith("avx512.mask.cmp.q") || // Added in 5.0 226 Name.startswith("avx512.mask.cmp.w") || // Added in 5.0 227 Name.startswith("avx512.mask.cmp.p") || // Added in 7.0 228 Name.startswith("avx512.mask.ucmp.") || // Added in 5.0 229 Name.startswith("avx512.cvtb2mask.") || // Added in 7.0 230 Name.startswith("avx512.cvtw2mask.") || // Added in 7.0 231 Name.startswith("avx512.cvtd2mask.") || // Added in 7.0 232 Name.startswith("avx512.cvtq2mask.") || // Added in 7.0 233 Name.startswith("avx512.mask.vpermilvar.") || // Added in 4.0 234 Name.startswith("avx512.mask.psll.d") || // Added in 4.0 235 Name.startswith("avx512.mask.psll.q") || // Added in 4.0 236 Name.startswith("avx512.mask.psll.w") || // Added in 4.0 237 Name.startswith("avx512.mask.psra.d") || // Added in 4.0 238 Name.startswith("avx512.mask.psra.q") || // Added in 4.0 239 Name.startswith("avx512.mask.psra.w") || // Added in 4.0 240 Name.startswith("avx512.mask.psrl.d") || // Added in 4.0 241 Name.startswith("avx512.mask.psrl.q") || // Added in 4.0 242 Name.startswith("avx512.mask.psrl.w") || // Added in 4.0 243 Name.startswith("avx512.mask.pslli") || // Added in 4.0 244 Name.startswith("avx512.mask.psrai") || // Added in 4.0 245 Name.startswith("avx512.mask.psrli") || // Added in 4.0 246 Name.startswith("avx512.mask.psllv") || // Added in 4.0 247 Name.startswith("avx512.mask.psrav") || // Added in 4.0 248 Name.startswith("avx512.mask.psrlv") || // Added in 4.0 249 Name.startswith("sse41.pmovsx") || // Added in 3.8 250 Name.startswith("sse41.pmovzx") || // Added in 3.9 251 Name.startswith("avx2.pmovsx") || // Added in 3.9 252 Name.startswith("avx2.pmovzx") || // Added in 3.9 253 Name.startswith("avx512.mask.pmovsx") || // Added in 4.0 254 Name.startswith("avx512.mask.pmovzx") || // Added in 4.0 255 Name.startswith("avx512.mask.lzcnt.") || // Added in 5.0 256 Name.startswith("avx512.mask.pternlog.") || // Added in 7.0 257 Name.startswith("avx512.maskz.pternlog.") || // Added in 7.0 258 Name.startswith("avx512.mask.vpmadd52") || // Added in 7.0 259 Name.startswith("avx512.maskz.vpmadd52") || // Added in 7.0 260 Name.startswith("avx512.mask.vpermi2var.") || // Added in 7.0 261 Name.startswith("avx512.mask.vpermt2var.") || // Added in 7.0 262 Name.startswith("avx512.maskz.vpermt2var.") || // Added in 7.0 263 Name.startswith("avx512.mask.vpdpbusd.") || // Added in 7.0 264 Name.startswith("avx512.maskz.vpdpbusd.") || // Added in 7.0 265 Name.startswith("avx512.mask.vpdpbusds.") || // Added in 7.0 266 Name.startswith("avx512.maskz.vpdpbusds.") || // Added in 7.0 267 Name.startswith("avx512.mask.vpdpwssd.") || // Added in 7.0 268 Name.startswith("avx512.maskz.vpdpwssd.") || // Added in 7.0 269 Name.startswith("avx512.mask.vpdpwssds.") || // Added in 7.0 270 Name.startswith("avx512.maskz.vpdpwssds.") || // Added in 7.0 271 Name.startswith("avx512.mask.dbpsadbw.") || // Added in 7.0 272 Name.startswith("avx512.mask.vpshld.") || // Added in 7.0 273 Name.startswith("avx512.mask.vpshrd.") || // Added in 7.0 274 Name.startswith("avx512.mask.add.p") || // Added in 7.0. 128/256 in 4.0 275 Name.startswith("avx512.mask.sub.p") || // Added in 7.0. 128/256 in 4.0 276 Name.startswith("avx512.mask.mul.p") || // Added in 7.0. 128/256 in 4.0 277 Name.startswith("avx512.mask.div.p") || // Added in 7.0. 128/256 in 4.0 278 Name.startswith("avx512.mask.max.p") || // Added in 7.0. 128/256 in 5.0 279 Name.startswith("avx512.mask.min.p") || // Added in 7.0. 128/256 in 5.0 280 Name.startswith("avx512.mask.fpclass.p") || // Added in 7.0 281 Name.startswith("avx512.mask.prorv.") || // Added in 7.0 282 Name.startswith("avx512.mask.pror.") || // Added in 7.0 283 Name.startswith("avx512.mask.prolv.") || // Added in 7.0 284 Name.startswith("avx512.mask.prol.") || // Added in 7.0 285 Name.startswith("avx512.mask.padds.") || // Added in 8.0 286 Name.startswith("avx512.mask.psubs.") || // Added in 8.0 287 Name == "sse.cvtsi2ss" || // Added in 7.0 288 Name == "sse.cvtsi642ss" || // Added in 7.0 289 Name == "sse2.cvtsi2sd" || // Added in 7.0 290 Name == "sse2.cvtsi642sd" || // Added in 7.0 291 Name == "sse2.cvtss2sd" || // Added in 7.0 292 Name == "sse2.cvtdq2pd" || // Added in 3.9 293 Name == "sse2.cvtdq2ps" || // Added in 7.0 294 Name == "sse2.cvtps2pd" || // Added in 3.9 295 Name == "avx.cvtdq2.pd.256" || // Added in 3.9 296 Name == "avx.cvtdq2.ps.256" || // Added in 7.0 297 Name == "avx.cvt.ps2.pd.256" || // Added in 3.9 298 Name.startswith("avx.vinsertf128.") || // Added in 3.7 299 Name == "avx2.vinserti128" || // Added in 3.7 300 Name.startswith("avx512.mask.insert") || // Added in 4.0 301 Name.startswith("avx.vextractf128.") || // Added in 3.7 302 Name == "avx2.vextracti128" || // Added in 3.7 303 Name.startswith("avx512.mask.vextract") || // Added in 4.0 304 Name.startswith("sse4a.movnt.") || // Added in 3.9 305 Name.startswith("avx.movnt.") || // Added in 3.2 306 Name.startswith("avx512.storent.") || // Added in 3.9 307 Name == "sse41.movntdqa" || // Added in 5.0 308 Name == "avx2.movntdqa" || // Added in 5.0 309 Name == "avx512.movntdqa" || // Added in 5.0 310 Name == "sse2.storel.dq" || // Added in 3.9 311 Name.startswith("sse.storeu.") || // Added in 3.9 312 Name.startswith("sse2.storeu.") || // Added in 3.9 313 Name.startswith("avx.storeu.") || // Added in 3.9 314 Name.startswith("avx512.mask.storeu.") || // Added in 3.9 315 Name.startswith("avx512.mask.store.p") || // Added in 3.9 316 Name.startswith("avx512.mask.store.b.") || // Added in 3.9 317 Name.startswith("avx512.mask.store.w.") || // Added in 3.9 318 Name.startswith("avx512.mask.store.d.") || // Added in 3.9 319 Name.startswith("avx512.mask.store.q.") || // Added in 3.9 320 Name == "avx512.mask.store.ss" || // Added in 7.0 321 Name.startswith("avx512.mask.loadu.") || // Added in 3.9 322 Name.startswith("avx512.mask.load.") || // Added in 3.9 323 Name.startswith("avx512.mask.expand.load.") || // Added in 7.0 324 Name.startswith("avx512.mask.compress.store.") || // Added in 7.0 325 Name == "sse42.crc32.64.8" || // Added in 3.4 326 Name.startswith("avx.vbroadcast.s") || // Added in 3.5 327 Name.startswith("avx512.vbroadcast.s") || // Added in 7.0 328 Name.startswith("avx512.mask.palignr.") || // Added in 3.9 329 Name.startswith("avx512.mask.valign.") || // Added in 4.0 330 Name.startswith("sse2.psll.dq") || // Added in 3.7 331 Name.startswith("sse2.psrl.dq") || // Added in 3.7 332 Name.startswith("avx2.psll.dq") || // Added in 3.7 333 Name.startswith("avx2.psrl.dq") || // Added in 3.7 334 Name.startswith("avx512.psll.dq") || // Added in 3.9 335 Name.startswith("avx512.psrl.dq") || // Added in 3.9 336 Name == "sse41.pblendw" || // Added in 3.7 337 Name.startswith("sse41.blendp") || // Added in 3.7 338 Name.startswith("avx.blend.p") || // Added in 3.7 339 Name == "avx2.pblendw" || // Added in 3.7 340 Name.startswith("avx2.pblendd.") || // Added in 3.7 341 Name.startswith("avx.vbroadcastf128") || // Added in 4.0 342 Name == "avx2.vbroadcasti128" || // Added in 3.7 343 Name.startswith("avx512.mask.broadcastf") || // Added in 6.0 344 Name.startswith("avx512.mask.broadcasti") || // Added in 6.0 345 Name == "xop.vpcmov" || // Added in 3.8 346 Name == "xop.vpcmov.256" || // Added in 5.0 347 Name.startswith("avx512.mask.move.s") || // Added in 4.0 348 Name.startswith("avx512.cvtmask2") || // Added in 5.0 349 (Name.startswith("xop.vpcom") && // Added in 3.2 350 F->arg_size() == 2) || 351 Name.startswith("avx512.ptestm") || //Added in 6.0 352 Name.startswith("avx512.ptestnm") || //Added in 6.0 353 Name.startswith("sse2.pavg") || // Added in 6.0 354 Name.startswith("avx2.pavg") || // Added in 6.0 355 Name.startswith("avx512.mask.pavg")) // Added in 6.0 356 return true; 357 358 return false; 359 } 360 361 static bool UpgradeX86IntrinsicFunction(Function *F, StringRef Name, 362 Function *&NewFn) { 363 // Only handle intrinsics that start with "x86.". 364 if (!Name.startswith("x86.")) 365 return false; 366 // Remove "x86." prefix. 367 Name = Name.substr(4); 368 369 if (ShouldUpgradeX86Intrinsic(F, Name)) { 370 NewFn = nullptr; 371 return true; 372 } 373 374 // SSE4.1 ptest functions may have an old signature. 375 if (Name.startswith("sse41.ptest")) { // Added in 3.2 376 if (Name.substr(11) == "c") 377 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestc, NewFn); 378 if (Name.substr(11) == "z") 379 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestz, NewFn); 380 if (Name.substr(11) == "nzc") 381 return UpgradePTESTIntrinsic(F, Intrinsic::x86_sse41_ptestnzc, NewFn); 382 } 383 // Several blend and other instructions with masks used the wrong number of 384 // bits. 385 if (Name == "sse41.insertps") // Added in 3.6 386 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_insertps, 387 NewFn); 388 if (Name == "sse41.dppd") // Added in 3.6 389 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dppd, 390 NewFn); 391 if (Name == "sse41.dpps") // Added in 3.6 392 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_dpps, 393 NewFn); 394 if (Name == "sse41.mpsadbw") // Added in 3.6 395 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_sse41_mpsadbw, 396 NewFn); 397 if (Name == "avx.dp.ps.256") // Added in 3.6 398 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx_dp_ps_256, 399 NewFn); 400 if (Name == "avx2.mpsadbw") // Added in 3.6 401 return UpgradeX86IntrinsicsWith8BitMask(F, Intrinsic::x86_avx2_mpsadbw, 402 NewFn); 403 404 // frcz.ss/sd may need to have an argument dropped. Added in 3.2 405 if (Name.startswith("xop.vfrcz.ss") && F->arg_size() == 2) { 406 rename(F); 407 NewFn = Intrinsic::getDeclaration(F->getParent(), 408 Intrinsic::x86_xop_vfrcz_ss); 409 return true; 410 } 411 if (Name.startswith("xop.vfrcz.sd") && F->arg_size() == 2) { 412 rename(F); 413 NewFn = Intrinsic::getDeclaration(F->getParent(), 414 Intrinsic::x86_xop_vfrcz_sd); 415 return true; 416 } 417 // Upgrade any XOP PERMIL2 index operand still using a float/double vector. 418 if (Name.startswith("xop.vpermil2")) { // Added in 3.9 419 auto Idx = F->getFunctionType()->getParamType(2); 420 if (Idx->isFPOrFPVectorTy()) { 421 rename(F); 422 unsigned IdxSize = Idx->getPrimitiveSizeInBits(); 423 unsigned EltSize = Idx->getScalarSizeInBits(); 424 Intrinsic::ID Permil2ID; 425 if (EltSize == 64 && IdxSize == 128) 426 Permil2ID = Intrinsic::x86_xop_vpermil2pd; 427 else if (EltSize == 32 && IdxSize == 128) 428 Permil2ID = Intrinsic::x86_xop_vpermil2ps; 429 else if (EltSize == 64 && IdxSize == 256) 430 Permil2ID = Intrinsic::x86_xop_vpermil2pd_256; 431 else 432 Permil2ID = Intrinsic::x86_xop_vpermil2ps_256; 433 NewFn = Intrinsic::getDeclaration(F->getParent(), Permil2ID); 434 return true; 435 } 436 } 437 438 return false; 439 } 440 441 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { 442 assert(F && "Illegal to upgrade a non-existent Function."); 443 444 // Quickly eliminate it, if it's not a candidate. 445 StringRef Name = F->getName(); 446 if (Name.size() <= 8 || !Name.startswith("llvm.")) 447 return false; 448 Name = Name.substr(5); // Strip off "llvm." 449 450 switch (Name[0]) { 451 default: break; 452 case 'a': { 453 if (Name.startswith("arm.rbit") || Name.startswith("aarch64.rbit")) { 454 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::bitreverse, 455 F->arg_begin()->getType()); 456 return true; 457 } 458 if (Name.startswith("arm.neon.vclz")) { 459 Type* args[2] = { 460 F->arg_begin()->getType(), 461 Type::getInt1Ty(F->getContext()) 462 }; 463 // Can't use Intrinsic::getDeclaration here as it adds a ".i1" to 464 // the end of the name. Change name from llvm.arm.neon.vclz.* to 465 // llvm.ctlz.* 466 FunctionType* fType = FunctionType::get(F->getReturnType(), args, false); 467 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 468 "llvm.ctlz." + Name.substr(14), F->getParent()); 469 return true; 470 } 471 if (Name.startswith("arm.neon.vcnt")) { 472 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 473 F->arg_begin()->getType()); 474 return true; 475 } 476 Regex vldRegex("^arm\\.neon\\.vld([1234]|[234]lane)\\.v[a-z0-9]*$"); 477 if (vldRegex.match(Name)) { 478 auto fArgs = F->getFunctionType()->params(); 479 SmallVector<Type *, 4> Tys(fArgs.begin(), fArgs.end()); 480 // Can't use Intrinsic::getDeclaration here as the return types might 481 // then only be structurally equal. 482 FunctionType* fType = FunctionType::get(F->getReturnType(), Tys, false); 483 NewFn = Function::Create(fType, F->getLinkage(), F->getAddressSpace(), 484 "llvm." + Name + ".p0i8", F->getParent()); 485 return true; 486 } 487 Regex vstRegex("^arm\\.neon\\.vst([1234]|[234]lane)\\.v[a-z0-9]*$"); 488 if (vstRegex.match(Name)) { 489 static const Intrinsic::ID StoreInts[] = {Intrinsic::arm_neon_vst1, 490 Intrinsic::arm_neon_vst2, 491 Intrinsic::arm_neon_vst3, 492 Intrinsic::arm_neon_vst4}; 493 494 static const Intrinsic::ID StoreLaneInts[] = { 495 Intrinsic::arm_neon_vst2lane, Intrinsic::arm_neon_vst3lane, 496 Intrinsic::arm_neon_vst4lane 497 }; 498 499 auto fArgs = F->getFunctionType()->params(); 500 Type *Tys[] = {fArgs[0], fArgs[1]}; 501 if (Name.find("lane") == StringRef::npos) 502 NewFn = Intrinsic::getDeclaration(F->getParent(), 503 StoreInts[fArgs.size() - 3], Tys); 504 else 505 NewFn = Intrinsic::getDeclaration(F->getParent(), 506 StoreLaneInts[fArgs.size() - 5], Tys); 507 return true; 508 } 509 if (Name == "aarch64.thread.pointer" || Name == "arm.thread.pointer") { 510 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::thread_pointer); 511 return true; 512 } 513 break; 514 } 515 516 case 'c': { 517 if (Name.startswith("ctlz.") && F->arg_size() == 1) { 518 rename(F); 519 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 520 F->arg_begin()->getType()); 521 return true; 522 } 523 if (Name.startswith("cttz.") && F->arg_size() == 1) { 524 rename(F); 525 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::cttz, 526 F->arg_begin()->getType()); 527 return true; 528 } 529 break; 530 } 531 case 'd': { 532 if (Name == "dbg.value" && F->arg_size() == 4) { 533 rename(F); 534 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::dbg_value); 535 return true; 536 } 537 break; 538 } 539 case 'i': 540 case 'l': { 541 bool IsLifetimeStart = Name.startswith("lifetime.start"); 542 if (IsLifetimeStart || Name.startswith("invariant.start")) { 543 Intrinsic::ID ID = IsLifetimeStart ? 544 Intrinsic::lifetime_start : Intrinsic::invariant_start; 545 auto Args = F->getFunctionType()->params(); 546 Type* ObjectPtr[1] = {Args[1]}; 547 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 548 rename(F); 549 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 550 return true; 551 } 552 } 553 554 bool IsLifetimeEnd = Name.startswith("lifetime.end"); 555 if (IsLifetimeEnd || Name.startswith("invariant.end")) { 556 Intrinsic::ID ID = IsLifetimeEnd ? 557 Intrinsic::lifetime_end : Intrinsic::invariant_end; 558 559 auto Args = F->getFunctionType()->params(); 560 Type* ObjectPtr[1] = {Args[IsLifetimeEnd ? 1 : 2]}; 561 if (F->getName() != Intrinsic::getName(ID, ObjectPtr)) { 562 rename(F); 563 NewFn = Intrinsic::getDeclaration(F->getParent(), ID, ObjectPtr); 564 return true; 565 } 566 } 567 if (Name.startswith("invariant.group.barrier")) { 568 // Rename invariant.group.barrier to launder.invariant.group 569 auto Args = F->getFunctionType()->params(); 570 Type* ObjectPtr[1] = {Args[0]}; 571 rename(F); 572 NewFn = Intrinsic::getDeclaration(F->getParent(), 573 Intrinsic::launder_invariant_group, ObjectPtr); 574 return true; 575 576 } 577 578 break; 579 } 580 case 'm': { 581 if (Name.startswith("masked.load.")) { 582 Type *Tys[] = { F->getReturnType(), F->arg_begin()->getType() }; 583 if (F->getName() != Intrinsic::getName(Intrinsic::masked_load, Tys)) { 584 rename(F); 585 NewFn = Intrinsic::getDeclaration(F->getParent(), 586 Intrinsic::masked_load, 587 Tys); 588 return true; 589 } 590 } 591 if (Name.startswith("masked.store.")) { 592 auto Args = F->getFunctionType()->params(); 593 Type *Tys[] = { Args[0], Args[1] }; 594 if (F->getName() != Intrinsic::getName(Intrinsic::masked_store, Tys)) { 595 rename(F); 596 NewFn = Intrinsic::getDeclaration(F->getParent(), 597 Intrinsic::masked_store, 598 Tys); 599 return true; 600 } 601 } 602 // Renaming gather/scatter intrinsics with no address space overloading 603 // to the new overload which includes an address space 604 if (Name.startswith("masked.gather.")) { 605 Type *Tys[] = {F->getReturnType(), F->arg_begin()->getType()}; 606 if (F->getName() != Intrinsic::getName(Intrinsic::masked_gather, Tys)) { 607 rename(F); 608 NewFn = Intrinsic::getDeclaration(F->getParent(), 609 Intrinsic::masked_gather, Tys); 610 return true; 611 } 612 } 613 if (Name.startswith("masked.scatter.")) { 614 auto Args = F->getFunctionType()->params(); 615 Type *Tys[] = {Args[0], Args[1]}; 616 if (F->getName() != Intrinsic::getName(Intrinsic::masked_scatter, Tys)) { 617 rename(F); 618 NewFn = Intrinsic::getDeclaration(F->getParent(), 619 Intrinsic::masked_scatter, Tys); 620 return true; 621 } 622 } 623 // Updating the memory intrinsics (memcpy/memmove/memset) that have an 624 // alignment parameter to embedding the alignment as an attribute of 625 // the pointer args. 626 if (Name.startswith("memcpy.") && F->arg_size() == 5) { 627 rename(F); 628 // Get the types of dest, src, and len 629 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 630 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy, 631 ParamTypes); 632 return true; 633 } 634 if (Name.startswith("memmove.") && F->arg_size() == 5) { 635 rename(F); 636 // Get the types of dest, src, and len 637 ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3); 638 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove, 639 ParamTypes); 640 return true; 641 } 642 if (Name.startswith("memset.") && F->arg_size() == 5) { 643 rename(F); 644 // Get the types of dest, and len 645 const auto *FT = F->getFunctionType(); 646 Type *ParamTypes[2] = { 647 FT->getParamType(0), // Dest 648 FT->getParamType(2) // len 649 }; 650 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset, 651 ParamTypes); 652 return true; 653 } 654 break; 655 } 656 case 'n': { 657 if (Name.startswith("nvvm.")) { 658 Name = Name.substr(5); 659 660 // The following nvvm intrinsics correspond exactly to an LLVM intrinsic. 661 Intrinsic::ID IID = StringSwitch<Intrinsic::ID>(Name) 662 .Cases("brev32", "brev64", Intrinsic::bitreverse) 663 .Case("clz.i", Intrinsic::ctlz) 664 .Case("popc.i", Intrinsic::ctpop) 665 .Default(Intrinsic::not_intrinsic); 666 if (IID != Intrinsic::not_intrinsic && F->arg_size() == 1) { 667 NewFn = Intrinsic::getDeclaration(F->getParent(), IID, 668 {F->getReturnType()}); 669 return true; 670 } 671 672 // The following nvvm intrinsics correspond exactly to an LLVM idiom, but 673 // not to an intrinsic alone. We expand them in UpgradeIntrinsicCall. 674 // 675 // TODO: We could add lohi.i2d. 676 bool Expand = StringSwitch<bool>(Name) 677 .Cases("abs.i", "abs.ll", true) 678 .Cases("clz.ll", "popc.ll", "h2f", true) 679 .Cases("max.i", "max.ll", "max.ui", "max.ull", true) 680 .Cases("min.i", "min.ll", "min.ui", "min.ull", true) 681 .Default(false); 682 if (Expand) { 683 NewFn = nullptr; 684 return true; 685 } 686 } 687 break; 688 } 689 case 'o': 690 // We only need to change the name to match the mangling including the 691 // address space. 692 if (Name.startswith("objectsize.")) { 693 Type *Tys[2] = { F->getReturnType(), F->arg_begin()->getType() }; 694 if (F->arg_size() == 2 || 695 F->getName() != Intrinsic::getName(Intrinsic::objectsize, Tys)) { 696 rename(F); 697 NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize, 698 Tys); 699 return true; 700 } 701 } 702 break; 703 704 case 's': 705 if (Name == "stackprotectorcheck") { 706 NewFn = nullptr; 707 return true; 708 } 709 break; 710 711 case 'x': 712 if (UpgradeX86IntrinsicFunction(F, Name, NewFn)) 713 return true; 714 } 715 // Remangle our intrinsic since we upgrade the mangling 716 auto Result = llvm::Intrinsic::remangleIntrinsicFunction(F); 717 if (Result != None) { 718 NewFn = Result.getValue(); 719 return true; 720 } 721 722 // This may not belong here. This function is effectively being overloaded 723 // to both detect an intrinsic which needs upgrading, and to provide the 724 // upgraded form of the intrinsic. We should perhaps have two separate 725 // functions for this. 726 return false; 727 } 728 729 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) { 730 NewFn = nullptr; 731 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn); 732 assert(F != NewFn && "Intrinsic function upgraded to the same function"); 733 734 // Upgrade intrinsic attributes. This does not change the function. 735 if (NewFn) 736 F = NewFn; 737 if (Intrinsic::ID id = F->getIntrinsicID()) 738 F->setAttributes(Intrinsic::getAttributes(F->getContext(), id)); 739 return Upgraded; 740 } 741 742 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) { 743 // Nothing to do yet. 744 return false; 745 } 746 747 // Handles upgrading SSE2/AVX2/AVX512BW PSLLDQ intrinsics by converting them 748 // to byte shuffles. 749 static Value *UpgradeX86PSLLDQIntrinsics(IRBuilder<> &Builder, 750 Value *Op, unsigned Shift) { 751 Type *ResultTy = Op->getType(); 752 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 753 754 // Bitcast from a 64-bit element type to a byte element type. 755 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 756 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 757 758 // We'll be shuffling in zeroes. 759 Value *Res = Constant::getNullValue(VecTy); 760 761 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 762 // we'll just return the zero vector. 763 if (Shift < 16) { 764 uint32_t Idxs[64]; 765 // 256/512-bit version is split into 2/4 16-byte lanes. 766 for (unsigned l = 0; l != NumElts; l += 16) 767 for (unsigned i = 0; i != 16; ++i) { 768 unsigned Idx = NumElts + i - Shift; 769 if (Idx < NumElts) 770 Idx -= NumElts - 16; // end of lane, switch operand. 771 Idxs[l + i] = Idx + l; 772 } 773 774 Res = Builder.CreateShuffleVector(Res, Op, makeArrayRef(Idxs, NumElts)); 775 } 776 777 // Bitcast back to a 64-bit element type. 778 return Builder.CreateBitCast(Res, ResultTy, "cast"); 779 } 780 781 // Handles upgrading SSE2/AVX2/AVX512BW PSRLDQ intrinsics by converting them 782 // to byte shuffles. 783 static Value *UpgradeX86PSRLDQIntrinsics(IRBuilder<> &Builder, Value *Op, 784 unsigned Shift) { 785 Type *ResultTy = Op->getType(); 786 unsigned NumElts = ResultTy->getVectorNumElements() * 8; 787 788 // Bitcast from a 64-bit element type to a byte element type. 789 Type *VecTy = VectorType::get(Builder.getInt8Ty(), NumElts); 790 Op = Builder.CreateBitCast(Op, VecTy, "cast"); 791 792 // We'll be shuffling in zeroes. 793 Value *Res = Constant::getNullValue(VecTy); 794 795 // If shift is less than 16, emit a shuffle to move the bytes. Otherwise, 796 // we'll just return the zero vector. 797 if (Shift < 16) { 798 uint32_t Idxs[64]; 799 // 256/512-bit version is split into 2/4 16-byte lanes. 800 for (unsigned l = 0; l != NumElts; l += 16) 801 for (unsigned i = 0; i != 16; ++i) { 802 unsigned Idx = i + Shift; 803 if (Idx >= 16) 804 Idx += NumElts - 16; // end of lane, switch operand. 805 Idxs[l + i] = Idx + l; 806 } 807 808 Res = Builder.CreateShuffleVector(Op, Res, makeArrayRef(Idxs, NumElts)); 809 } 810 811 // Bitcast back to a 64-bit element type. 812 return Builder.CreateBitCast(Res, ResultTy, "cast"); 813 } 814 815 static Value *getX86MaskVec(IRBuilder<> &Builder, Value *Mask, 816 unsigned NumElts) { 817 llvm::VectorType *MaskTy = llvm::VectorType::get(Builder.getInt1Ty(), 818 cast<IntegerType>(Mask->getType())->getBitWidth()); 819 Mask = Builder.CreateBitCast(Mask, MaskTy); 820 821 // If we have less than 8 elements, then the starting mask was an i8 and 822 // we need to extract down to the right number of elements. 823 if (NumElts < 8) { 824 uint32_t Indices[4]; 825 for (unsigned i = 0; i != NumElts; ++i) 826 Indices[i] = i; 827 Mask = Builder.CreateShuffleVector(Mask, Mask, 828 makeArrayRef(Indices, NumElts), 829 "extract"); 830 } 831 832 return Mask; 833 } 834 835 static Value *EmitX86Select(IRBuilder<> &Builder, Value *Mask, 836 Value *Op0, Value *Op1) { 837 // If the mask is all ones just emit the first operation. 838 if (const auto *C = dyn_cast<Constant>(Mask)) 839 if (C->isAllOnesValue()) 840 return Op0; 841 842 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); 843 return Builder.CreateSelect(Mask, Op0, Op1); 844 } 845 846 static Value *EmitX86ScalarSelect(IRBuilder<> &Builder, Value *Mask, 847 Value *Op0, Value *Op1) { 848 // If the mask is all ones just emit the first operation. 849 if (const auto *C = dyn_cast<Constant>(Mask)) 850 if (C->isAllOnesValue()) 851 return Op0; 852 853 llvm::VectorType *MaskTy = 854 llvm::VectorType::get(Builder.getInt1Ty(), 855 Mask->getType()->getIntegerBitWidth()); 856 Mask = Builder.CreateBitCast(Mask, MaskTy); 857 Mask = Builder.CreateExtractElement(Mask, (uint64_t)0); 858 return Builder.CreateSelect(Mask, Op0, Op1); 859 } 860 861 // Handle autoupgrade for masked PALIGNR and VALIGND/Q intrinsics. 862 // PALIGNR handles large immediates by shifting while VALIGN masks the immediate 863 // so we need to handle both cases. VALIGN also doesn't have 128-bit lanes. 864 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, 865 Value *Op1, Value *Shift, 866 Value *Passthru, Value *Mask, 867 bool IsVALIGN) { 868 unsigned ShiftVal = cast<llvm::ConstantInt>(Shift)->getZExtValue(); 869 870 unsigned NumElts = Op0->getType()->getVectorNumElements(); 871 assert((IsVALIGN || NumElts % 16 == 0) && "Illegal NumElts for PALIGNR!"); 872 assert((!IsVALIGN || NumElts <= 16) && "NumElts too large for VALIGN!"); 873 assert(isPowerOf2_32(NumElts) && "NumElts not a power of 2!"); 874 875 // Mask the immediate for VALIGN. 876 if (IsVALIGN) 877 ShiftVal &= (NumElts - 1); 878 879 // If palignr is shifting the pair of vectors more than the size of two 880 // lanes, emit zero. 881 if (ShiftVal >= 32) 882 return llvm::Constant::getNullValue(Op0->getType()); 883 884 // If palignr is shifting the pair of input vectors more than one lane, 885 // but less than two lanes, convert to shifting in zeroes. 886 if (ShiftVal > 16) { 887 ShiftVal -= 16; 888 Op1 = Op0; 889 Op0 = llvm::Constant::getNullValue(Op0->getType()); 890 } 891 892 uint32_t Indices[64]; 893 // 256-bit palignr operates on 128-bit lanes so we need to handle that 894 for (unsigned l = 0; l < NumElts; l += 16) { 895 for (unsigned i = 0; i != 16; ++i) { 896 unsigned Idx = ShiftVal + i; 897 if (!IsVALIGN && Idx >= 16) // Disable wrap for VALIGN. 898 Idx += NumElts - 16; // End of lane, switch operand. 899 Indices[l + i] = Idx + l; 900 } 901 } 902 903 Value *Align = Builder.CreateShuffleVector(Op1, Op0, 904 makeArrayRef(Indices, NumElts), 905 "palignr"); 906 907 return EmitX86Select(Builder, Mask, Align, Passthru); 908 } 909 910 static Value *UpgradeX86AddSubSatIntrinsics(IRBuilder<> &Builder, CallInst &CI, 911 bool IsAddition) { 912 Value *Op0 = CI.getOperand(0); 913 Value *Op1 = CI.getOperand(1); 914 915 // Collect vector elements and type data. 916 Type *ResultType = CI.getType(); 917 918 Value *Res; 919 if (IsAddition) { 920 // ADDUS: a > (a+b) ? ~0 : (a+b) 921 // If Op0 > Add, overflow occured. 922 Value *Add = Builder.CreateAdd(Op0, Op1); 923 Value *ICmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, Op0, Add); 924 Value *Max = llvm::Constant::getAllOnesValue(ResultType); 925 Res = Builder.CreateSelect(ICmp, Max, Add); 926 } else { 927 // SUBUS: max(a, b) - b 928 Value *ICmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, Op0, Op1); 929 Value *Select = Builder.CreateSelect(ICmp, Op0, Op1); 930 Res = Builder.CreateSub(Select, Op1); 931 } 932 933 if (CI.getNumArgOperands() == 4) { // For masked intrinsics. 934 Value *VecSrc = CI.getOperand(2); 935 Value *Mask = CI.getOperand(3); 936 Res = EmitX86Select(Builder, Mask, Res, VecSrc); 937 } 938 return Res; 939 } 940 941 static Value *UpgradeMaskedStore(IRBuilder<> &Builder, 942 Value *Ptr, Value *Data, Value *Mask, 943 bool Aligned) { 944 // Cast the pointer to the right type. 945 Ptr = Builder.CreateBitCast(Ptr, 946 llvm::PointerType::getUnqual(Data->getType())); 947 unsigned Align = 948 Aligned ? cast<VectorType>(Data->getType())->getBitWidth() / 8 : 1; 949 950 // If the mask is all ones just emit a regular store. 951 if (const auto *C = dyn_cast<Constant>(Mask)) 952 if (C->isAllOnesValue()) 953 return Builder.CreateAlignedStore(Data, Ptr, Align); 954 955 // Convert the mask from an integer type to a vector of i1. 956 unsigned NumElts = Data->getType()->getVectorNumElements(); 957 Mask = getX86MaskVec(Builder, Mask, NumElts); 958 return Builder.CreateMaskedStore(Data, Ptr, Align, Mask); 959 } 960 961 static Value *UpgradeMaskedLoad(IRBuilder<> &Builder, 962 Value *Ptr, Value *Passthru, Value *Mask, 963 bool Aligned) { 964 // Cast the pointer to the right type. 965 Ptr = Builder.CreateBitCast(Ptr, 966 llvm::PointerType::getUnqual(Passthru->getType())); 967 unsigned Align = 968 Aligned ? cast<VectorType>(Passthru->getType())->getBitWidth() / 8 : 1; 969 970 // If the mask is all ones just emit a regular store. 971 if (const auto *C = dyn_cast<Constant>(Mask)) 972 if (C->isAllOnesValue()) 973 return Builder.CreateAlignedLoad(Ptr, Align); 974 975 // Convert the mask from an integer type to a vector of i1. 976 unsigned NumElts = Passthru->getType()->getVectorNumElements(); 977 Mask = getX86MaskVec(Builder, Mask, NumElts); 978 return Builder.CreateMaskedLoad(Ptr, Align, Mask, Passthru); 979 } 980 981 static Value *upgradeAbs(IRBuilder<> &Builder, CallInst &CI) { 982 Value *Op0 = CI.getArgOperand(0); 983 llvm::Type *Ty = Op0->getType(); 984 Value *Zero = llvm::Constant::getNullValue(Ty); 985 Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Op0, Zero); 986 Value *Neg = Builder.CreateNeg(Op0); 987 Value *Res = Builder.CreateSelect(Cmp, Op0, Neg); 988 989 if (CI.getNumArgOperands() == 3) 990 Res = EmitX86Select(Builder,CI.getArgOperand(2), Res, CI.getArgOperand(1)); 991 992 return Res; 993 } 994 995 static Value *upgradeIntMinMax(IRBuilder<> &Builder, CallInst &CI, 996 ICmpInst::Predicate Pred) { 997 Value *Op0 = CI.getArgOperand(0); 998 Value *Op1 = CI.getArgOperand(1); 999 Value *Cmp = Builder.CreateICmp(Pred, Op0, Op1); 1000 Value *Res = Builder.CreateSelect(Cmp, Op0, Op1); 1001 1002 if (CI.getNumArgOperands() == 4) 1003 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1004 1005 return Res; 1006 } 1007 1008 static Value *upgradePMULDQ(IRBuilder<> &Builder, CallInst &CI, bool IsSigned) { 1009 Type *Ty = CI.getType(); 1010 1011 // Arguments have a vXi32 type so cast to vXi64. 1012 Value *LHS = Builder.CreateBitCast(CI.getArgOperand(0), Ty); 1013 Value *RHS = Builder.CreateBitCast(CI.getArgOperand(1), Ty); 1014 1015 if (IsSigned) { 1016 // Shift left then arithmetic shift right. 1017 Constant *ShiftAmt = ConstantInt::get(Ty, 32); 1018 LHS = Builder.CreateShl(LHS, ShiftAmt); 1019 LHS = Builder.CreateAShr(LHS, ShiftAmt); 1020 RHS = Builder.CreateShl(RHS, ShiftAmt); 1021 RHS = Builder.CreateAShr(RHS, ShiftAmt); 1022 } else { 1023 // Clear the upper bits. 1024 Constant *Mask = ConstantInt::get(Ty, 0xffffffff); 1025 LHS = Builder.CreateAnd(LHS, Mask); 1026 RHS = Builder.CreateAnd(RHS, Mask); 1027 } 1028 1029 Value *Res = Builder.CreateMul(LHS, RHS); 1030 1031 if (CI.getNumArgOperands() == 4) 1032 Res = EmitX86Select(Builder, CI.getArgOperand(3), Res, CI.getArgOperand(2)); 1033 1034 return Res; 1035 } 1036 1037 // Applying mask on vector of i1's and make sure result is at least 8 bits wide. 1038 static Value *ApplyX86MaskOn1BitsVec(IRBuilder<> &Builder, Value *Vec, 1039 Value *Mask) { 1040 unsigned NumElts = Vec->getType()->getVectorNumElements(); 1041 if (Mask) { 1042 const auto *C = dyn_cast<Constant>(Mask); 1043 if (!C || !C->isAllOnesValue()) 1044 Vec = Builder.CreateAnd(Vec, getX86MaskVec(Builder, Mask, NumElts)); 1045 } 1046 1047 if (NumElts < 8) { 1048 uint32_t Indices[8]; 1049 for (unsigned i = 0; i != NumElts; ++i) 1050 Indices[i] = i; 1051 for (unsigned i = NumElts; i != 8; ++i) 1052 Indices[i] = NumElts + i % NumElts; 1053 Vec = Builder.CreateShuffleVector(Vec, 1054 Constant::getNullValue(Vec->getType()), 1055 Indices); 1056 } 1057 return Builder.CreateBitCast(Vec, Builder.getIntNTy(std::max(NumElts, 8U))); 1058 } 1059 1060 static Value *upgradeMaskedCompare(IRBuilder<> &Builder, CallInst &CI, 1061 unsigned CC, bool Signed) { 1062 Value *Op0 = CI.getArgOperand(0); 1063 unsigned NumElts = Op0->getType()->getVectorNumElements(); 1064 1065 Value *Cmp; 1066 if (CC == 3) { 1067 Cmp = Constant::getNullValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1068 } else if (CC == 7) { 1069 Cmp = Constant::getAllOnesValue(llvm::VectorType::get(Builder.getInt1Ty(), NumElts)); 1070 } else { 1071 ICmpInst::Predicate Pred; 1072 switch (CC) { 1073 default: llvm_unreachable("Unknown condition code"); 1074 case 0: Pred = ICmpInst::ICMP_EQ; break; 1075 case 1: Pred = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; break; 1076 case 2: Pred = Signed ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE; break; 1077 case 4: Pred = ICmpInst::ICMP_NE; break; 1078 case 5: Pred = Signed ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE; break; 1079 case 6: Pred = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; break; 1080 } 1081 Cmp = Builder.CreateICmp(Pred, Op0, CI.getArgOperand(1)); 1082 } 1083 1084 Value *Mask = CI.getArgOperand(CI.getNumArgOperands() - 1); 1085 1086 return ApplyX86MaskOn1BitsVec(Builder, Cmp, Mask); 1087 } 1088 1089 // Replace a masked intrinsic with an older unmasked intrinsic. 1090 static Value *UpgradeX86MaskedShift(IRBuilder<> &Builder, CallInst &CI, 1091 Intrinsic::ID IID) { 1092 Function *Intrin = Intrinsic::getDeclaration(CI.getModule(), IID); 1093 Value *Rep = Builder.CreateCall(Intrin, 1094 { CI.getArgOperand(0), CI.getArgOperand(1) }); 1095 return EmitX86Select(Builder, CI.getArgOperand(3), Rep, CI.getArgOperand(2)); 1096 } 1097 1098 static Value* upgradeMaskedMove(IRBuilder<> &Builder, CallInst &CI) { 1099 Value* A = CI.getArgOperand(0); 1100 Value* B = CI.getArgOperand(1); 1101 Value* Src = CI.getArgOperand(2); 1102 Value* Mask = CI.getArgOperand(3); 1103 1104 Value* AndNode = Builder.CreateAnd(Mask, APInt(8, 1)); 1105 Value* Cmp = Builder.CreateIsNotNull(AndNode); 1106 Value* Extract1 = Builder.CreateExtractElement(B, (uint64_t)0); 1107 Value* Extract2 = Builder.CreateExtractElement(Src, (uint64_t)0); 1108 Value* Select = Builder.CreateSelect(Cmp, Extract1, Extract2); 1109 return Builder.CreateInsertElement(A, Select, (uint64_t)0); 1110 } 1111 1112 1113 static Value* UpgradeMaskToInt(IRBuilder<> &Builder, CallInst &CI) { 1114 Value* Op = CI.getArgOperand(0); 1115 Type* ReturnOp = CI.getType(); 1116 unsigned NumElts = CI.getType()->getVectorNumElements(); 1117 Value *Mask = getX86MaskVec(Builder, Op, NumElts); 1118 return Builder.CreateSExt(Mask, ReturnOp, "vpmovm2"); 1119 } 1120 1121 // Replace intrinsic with unmasked version and a select. 1122 static bool upgradeAVX512MaskToSelect(StringRef Name, IRBuilder<> &Builder, 1123 CallInst &CI, Value *&Rep) { 1124 Name = Name.substr(12); // Remove avx512.mask. 1125 1126 unsigned VecWidth = CI.getType()->getPrimitiveSizeInBits(); 1127 unsigned EltWidth = CI.getType()->getScalarSizeInBits(); 1128 Intrinsic::ID IID; 1129 if (Name.startswith("max.p")) { 1130 if (VecWidth == 128 && EltWidth == 32) 1131 IID = Intrinsic::x86_sse_max_ps; 1132 else if (VecWidth == 128 && EltWidth == 64) 1133 IID = Intrinsic::x86_sse2_max_pd; 1134 else if (VecWidth == 256 && EltWidth == 32) 1135 IID = Intrinsic::x86_avx_max_ps_256; 1136 else if (VecWidth == 256 && EltWidth == 64) 1137 IID = Intrinsic::x86_avx_max_pd_256; 1138 else 1139 llvm_unreachable("Unexpected intrinsic"); 1140 } else if (Name.startswith("min.p")) { 1141 if (VecWidth == 128 && EltWidth == 32) 1142 IID = Intrinsic::x86_sse_min_ps; 1143 else if (VecWidth == 128 && EltWidth == 64) 1144 IID = Intrinsic::x86_sse2_min_pd; 1145 else if (VecWidth == 256 && EltWidth == 32) 1146 IID = Intrinsic::x86_avx_min_ps_256; 1147 else if (VecWidth == 256 && EltWidth == 64) 1148 IID = Intrinsic::x86_avx_min_pd_256; 1149 else 1150 llvm_unreachable("Unexpected intrinsic"); 1151 } else if (Name.startswith("pshuf.b.")) { 1152 if (VecWidth == 128) 1153 IID = Intrinsic::x86_ssse3_pshuf_b_128; 1154 else if (VecWidth == 256) 1155 IID = Intrinsic::x86_avx2_pshuf_b; 1156 else if (VecWidth == 512) 1157 IID = Intrinsic::x86_avx512_pshuf_b_512; 1158 else 1159 llvm_unreachable("Unexpected intrinsic"); 1160 } else if (Name.startswith("pmul.hr.sw.")) { 1161 if (VecWidth == 128) 1162 IID = Intrinsic::x86_ssse3_pmul_hr_sw_128; 1163 else if (VecWidth == 256) 1164 IID = Intrinsic::x86_avx2_pmul_hr_sw; 1165 else if (VecWidth == 512) 1166 IID = Intrinsic::x86_avx512_pmul_hr_sw_512; 1167 else 1168 llvm_unreachable("Unexpected intrinsic"); 1169 } else if (Name.startswith("pmulh.w.")) { 1170 if (VecWidth == 128) 1171 IID = Intrinsic::x86_sse2_pmulh_w; 1172 else if (VecWidth == 256) 1173 IID = Intrinsic::x86_avx2_pmulh_w; 1174 else if (VecWidth == 512) 1175 IID = Intrinsic::x86_avx512_pmulh_w_512; 1176 else 1177 llvm_unreachable("Unexpected intrinsic"); 1178 } else if (Name.startswith("pmulhu.w.")) { 1179 if (VecWidth == 128) 1180 IID = Intrinsic::x86_sse2_pmulhu_w; 1181 else if (VecWidth == 256) 1182 IID = Intrinsic::x86_avx2_pmulhu_w; 1183 else if (VecWidth == 512) 1184 IID = Intrinsic::x86_avx512_pmulhu_w_512; 1185 else 1186 llvm_unreachable("Unexpected intrinsic"); 1187 } else if (Name.startswith("pmaddw.d.")) { 1188 if (VecWidth == 128) 1189 IID = Intrinsic::x86_sse2_pmadd_wd; 1190 else if (VecWidth == 256) 1191 IID = Intrinsic::x86_avx2_pmadd_wd; 1192 else if (VecWidth == 512) 1193 IID = Intrinsic::x86_avx512_pmaddw_d_512; 1194 else 1195 llvm_unreachable("Unexpected intrinsic"); 1196 } else if (Name.startswith("pmaddubs.w.")) { 1197 if (VecWidth == 128) 1198 IID = Intrinsic::x86_ssse3_pmadd_ub_sw_128; 1199 else if (VecWidth == 256) 1200 IID = Intrinsic::x86_avx2_pmadd_ub_sw; 1201 else if (VecWidth == 512) 1202 IID = Intrinsic::x86_avx512_pmaddubs_w_512; 1203 else 1204 llvm_unreachable("Unexpected intrinsic"); 1205 } else if (Name.startswith("packsswb.")) { 1206 if (VecWidth == 128) 1207 IID = Intrinsic::x86_sse2_packsswb_128; 1208 else if (VecWidth == 256) 1209 IID = Intrinsic::x86_avx2_packsswb; 1210 else if (VecWidth == 512) 1211 IID = Intrinsic::x86_avx512_packsswb_512; 1212 else 1213 llvm_unreachable("Unexpected intrinsic"); 1214 } else if (Name.startswith("packssdw.")) { 1215 if (VecWidth == 128) 1216 IID = Intrinsic::x86_sse2_packssdw_128; 1217 else if (VecWidth == 256) 1218 IID = Intrinsic::x86_avx2_packssdw; 1219 else if (VecWidth == 512) 1220 IID = Intrinsic::x86_avx512_packssdw_512; 1221 else 1222 llvm_unreachable("Unexpected intrinsic"); 1223 } else if (Name.startswith("packuswb.")) { 1224 if (VecWidth == 128) 1225 IID = Intrinsic::x86_sse2_packuswb_128; 1226 else if (VecWidth == 256) 1227 IID = Intrinsic::x86_avx2_packuswb; 1228 else if (VecWidth == 512) 1229 IID = Intrinsic::x86_avx512_packuswb_512; 1230 else 1231 llvm_unreachable("Unexpected intrinsic"); 1232 } else if (Name.startswith("packusdw.")) { 1233 if (VecWidth == 128) 1234 IID = Intrinsic::x86_sse41_packusdw; 1235 else if (VecWidth == 256) 1236 IID = Intrinsic::x86_avx2_packusdw; 1237 else if (VecWidth == 512) 1238 IID = Intrinsic::x86_avx512_packusdw_512; 1239 else 1240 llvm_unreachable("Unexpected intrinsic"); 1241 } else if (Name.startswith("vpermilvar.")) { 1242 if (VecWidth == 128 && EltWidth == 32) 1243 IID = Intrinsic::x86_avx_vpermilvar_ps; 1244 else if (VecWidth == 128 && EltWidth == 64) 1245 IID = Intrinsic::x86_avx_vpermilvar_pd; 1246 else if (VecWidth == 256 && EltWidth == 32) 1247 IID = Intrinsic::x86_avx_vpermilvar_ps_256; 1248 else if (VecWidth == 256 && EltWidth == 64) 1249 IID = Intrinsic::x86_avx_vpermilvar_pd_256; 1250 else if (VecWidth == 512 && EltWidth == 32) 1251 IID = Intrinsic::x86_avx512_vpermilvar_ps_512; 1252 else if (VecWidth == 512 && EltWidth == 64) 1253 IID = Intrinsic::x86_avx512_vpermilvar_pd_512; 1254 else 1255 llvm_unreachable("Unexpected intrinsic"); 1256 } else if (Name == "cvtpd2dq.256") { 1257 IID = Intrinsic::x86_avx_cvt_pd2dq_256; 1258 } else if (Name == "cvtpd2ps.256") { 1259 IID = Intrinsic::x86_avx_cvt_pd2_ps_256; 1260 } else if (Name == "cvttpd2dq.256") { 1261 IID = Intrinsic::x86_avx_cvtt_pd2dq_256; 1262 } else if (Name == "cvttps2dq.128") { 1263 IID = Intrinsic::x86_sse2_cvttps2dq; 1264 } else if (Name == "cvttps2dq.256") { 1265 IID = Intrinsic::x86_avx_cvtt_ps2dq_256; 1266 } else if (Name.startswith("permvar.")) { 1267 bool IsFloat = CI.getType()->isFPOrFPVectorTy(); 1268 if (VecWidth == 256 && EltWidth == 32 && IsFloat) 1269 IID = Intrinsic::x86_avx2_permps; 1270 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 1271 IID = Intrinsic::x86_avx2_permd; 1272 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 1273 IID = Intrinsic::x86_avx512_permvar_df_256; 1274 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 1275 IID = Intrinsic::x86_avx512_permvar_di_256; 1276 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 1277 IID = Intrinsic::x86_avx512_permvar_sf_512; 1278 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 1279 IID = Intrinsic::x86_avx512_permvar_si_512; 1280 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 1281 IID = Intrinsic::x86_avx512_permvar_df_512; 1282 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 1283 IID = Intrinsic::x86_avx512_permvar_di_512; 1284 else if (VecWidth == 128 && EltWidth == 16) 1285 IID = Intrinsic::x86_avx512_permvar_hi_128; 1286 else if (VecWidth == 256 && EltWidth == 16) 1287 IID = Intrinsic::x86_avx512_permvar_hi_256; 1288 else if (VecWidth == 512 && EltWidth == 16) 1289 IID = Intrinsic::x86_avx512_permvar_hi_512; 1290 else if (VecWidth == 128 && EltWidth == 8) 1291 IID = Intrinsic::x86_avx512_permvar_qi_128; 1292 else if (VecWidth == 256 && EltWidth == 8) 1293 IID = Intrinsic::x86_avx512_permvar_qi_256; 1294 else if (VecWidth == 512 && EltWidth == 8) 1295 IID = Intrinsic::x86_avx512_permvar_qi_512; 1296 else 1297 llvm_unreachable("Unexpected intrinsic"); 1298 } else if (Name.startswith("dbpsadbw.")) { 1299 if (VecWidth == 128) 1300 IID = Intrinsic::x86_avx512_dbpsadbw_128; 1301 else if (VecWidth == 256) 1302 IID = Intrinsic::x86_avx512_dbpsadbw_256; 1303 else if (VecWidth == 512) 1304 IID = Intrinsic::x86_avx512_dbpsadbw_512; 1305 else 1306 llvm_unreachable("Unexpected intrinsic"); 1307 } else if (Name.startswith("vpshld.")) { 1308 if (VecWidth == 128 && Name[7] == 'q') 1309 IID = Intrinsic::x86_avx512_vpshld_q_128; 1310 else if (VecWidth == 128 && Name[7] == 'd') 1311 IID = Intrinsic::x86_avx512_vpshld_d_128; 1312 else if (VecWidth == 128 && Name[7] == 'w') 1313 IID = Intrinsic::x86_avx512_vpshld_w_128; 1314 else if (VecWidth == 256 && Name[7] == 'q') 1315 IID = Intrinsic::x86_avx512_vpshld_q_256; 1316 else if (VecWidth == 256 && Name[7] == 'd') 1317 IID = Intrinsic::x86_avx512_vpshld_d_256; 1318 else if (VecWidth == 256 && Name[7] == 'w') 1319 IID = Intrinsic::x86_avx512_vpshld_w_256; 1320 else if (VecWidth == 512 && Name[7] == 'q') 1321 IID = Intrinsic::x86_avx512_vpshld_q_512; 1322 else if (VecWidth == 512 && Name[7] == 'd') 1323 IID = Intrinsic::x86_avx512_vpshld_d_512; 1324 else if (VecWidth == 512 && Name[7] == 'w') 1325 IID = Intrinsic::x86_avx512_vpshld_w_512; 1326 else 1327 llvm_unreachable("Unexpected intrinsic"); 1328 } else if (Name.startswith("vpshrd.")) { 1329 if (VecWidth == 128 && Name[7] == 'q') 1330 IID = Intrinsic::x86_avx512_vpshrd_q_128; 1331 else if (VecWidth == 128 && Name[7] == 'd') 1332 IID = Intrinsic::x86_avx512_vpshrd_d_128; 1333 else if (VecWidth == 128 && Name[7] == 'w') 1334 IID = Intrinsic::x86_avx512_vpshrd_w_128; 1335 else if (VecWidth == 256 && Name[7] == 'q') 1336 IID = Intrinsic::x86_avx512_vpshrd_q_256; 1337 else if (VecWidth == 256 && Name[7] == 'd') 1338 IID = Intrinsic::x86_avx512_vpshrd_d_256; 1339 else if (VecWidth == 256 && Name[7] == 'w') 1340 IID = Intrinsic::x86_avx512_vpshrd_w_256; 1341 else if (VecWidth == 512 && Name[7] == 'q') 1342 IID = Intrinsic::x86_avx512_vpshrd_q_512; 1343 else if (VecWidth == 512 && Name[7] == 'd') 1344 IID = Intrinsic::x86_avx512_vpshrd_d_512; 1345 else if (VecWidth == 512 && Name[7] == 'w') 1346 IID = Intrinsic::x86_avx512_vpshrd_w_512; 1347 else 1348 llvm_unreachable("Unexpected intrinsic"); 1349 } else if (Name.startswith("prorv.")) { 1350 if (VecWidth == 128 && EltWidth == 32) 1351 IID = Intrinsic::x86_avx512_prorv_d_128; 1352 else if (VecWidth == 256 && EltWidth == 32) 1353 IID = Intrinsic::x86_avx512_prorv_d_256; 1354 else if (VecWidth == 512 && EltWidth == 32) 1355 IID = Intrinsic::x86_avx512_prorv_d_512; 1356 else if (VecWidth == 128 && EltWidth == 64) 1357 IID = Intrinsic::x86_avx512_prorv_q_128; 1358 else if (VecWidth == 256 && EltWidth == 64) 1359 IID = Intrinsic::x86_avx512_prorv_q_256; 1360 else if (VecWidth == 512 && EltWidth == 64) 1361 IID = Intrinsic::x86_avx512_prorv_q_512; 1362 else 1363 llvm_unreachable("Unexpected intrinsic"); 1364 } else if (Name.startswith("prolv.")) { 1365 if (VecWidth == 128 && EltWidth == 32) 1366 IID = Intrinsic::x86_avx512_prolv_d_128; 1367 else if (VecWidth == 256 && EltWidth == 32) 1368 IID = Intrinsic::x86_avx512_prolv_d_256; 1369 else if (VecWidth == 512 && EltWidth == 32) 1370 IID = Intrinsic::x86_avx512_prolv_d_512; 1371 else if (VecWidth == 128 && EltWidth == 64) 1372 IID = Intrinsic::x86_avx512_prolv_q_128; 1373 else if (VecWidth == 256 && EltWidth == 64) 1374 IID = Intrinsic::x86_avx512_prolv_q_256; 1375 else if (VecWidth == 512 && EltWidth == 64) 1376 IID = Intrinsic::x86_avx512_prolv_q_512; 1377 else 1378 llvm_unreachable("Unexpected intrinsic"); 1379 } else if (Name.startswith("pror.")) { 1380 if (VecWidth == 128 && EltWidth == 32) 1381 IID = Intrinsic::x86_avx512_pror_d_128; 1382 else if (VecWidth == 256 && EltWidth == 32) 1383 IID = Intrinsic::x86_avx512_pror_d_256; 1384 else if (VecWidth == 512 && EltWidth == 32) 1385 IID = Intrinsic::x86_avx512_pror_d_512; 1386 else if (VecWidth == 128 && EltWidth == 64) 1387 IID = Intrinsic::x86_avx512_pror_q_128; 1388 else if (VecWidth == 256 && EltWidth == 64) 1389 IID = Intrinsic::x86_avx512_pror_q_256; 1390 else if (VecWidth == 512 && EltWidth == 64) 1391 IID = Intrinsic::x86_avx512_pror_q_512; 1392 else 1393 llvm_unreachable("Unexpected intrinsic"); 1394 } else if (Name.startswith("prol.")) { 1395 if (VecWidth == 128 && EltWidth == 32) 1396 IID = Intrinsic::x86_avx512_prol_d_128; 1397 else if (VecWidth == 256 && EltWidth == 32) 1398 IID = Intrinsic::x86_avx512_prol_d_256; 1399 else if (VecWidth == 512 && EltWidth == 32) 1400 IID = Intrinsic::x86_avx512_prol_d_512; 1401 else if (VecWidth == 128 && EltWidth == 64) 1402 IID = Intrinsic::x86_avx512_prol_q_128; 1403 else if (VecWidth == 256 && EltWidth == 64) 1404 IID = Intrinsic::x86_avx512_prol_q_256; 1405 else if (VecWidth == 512 && EltWidth == 64) 1406 IID = Intrinsic::x86_avx512_prol_q_512; 1407 else 1408 llvm_unreachable("Unexpected intrinsic"); 1409 } else if (Name.startswith("padds.")) { 1410 if (VecWidth == 128 && EltWidth == 8) 1411 IID = Intrinsic::x86_sse2_padds_b; 1412 else if (VecWidth == 256 && EltWidth == 8) 1413 IID = Intrinsic::x86_avx2_padds_b; 1414 else if (VecWidth == 512 && EltWidth == 8) 1415 IID = Intrinsic::x86_avx512_padds_b_512; 1416 else if (VecWidth == 128 && EltWidth == 16) 1417 IID = Intrinsic::x86_sse2_padds_w; 1418 else if (VecWidth == 256 && EltWidth == 16) 1419 IID = Intrinsic::x86_avx2_padds_w; 1420 else if (VecWidth == 512 && EltWidth == 16) 1421 IID = Intrinsic::x86_avx512_padds_w_512; 1422 else 1423 llvm_unreachable("Unexpected intrinsic"); 1424 } else if (Name.startswith("psubs.")) { 1425 if (VecWidth == 128 && EltWidth == 8) 1426 IID = Intrinsic::x86_sse2_psubs_b; 1427 else if (VecWidth == 256 && EltWidth == 8) 1428 IID = Intrinsic::x86_avx2_psubs_b; 1429 else if (VecWidth == 512 && EltWidth == 8) 1430 IID = Intrinsic::x86_avx512_psubs_b_512; 1431 else if (VecWidth == 128 && EltWidth == 16) 1432 IID = Intrinsic::x86_sse2_psubs_w; 1433 else if (VecWidth == 256 && EltWidth == 16) 1434 IID = Intrinsic::x86_avx2_psubs_w; 1435 else if (VecWidth == 512 && EltWidth == 16) 1436 IID = Intrinsic::x86_avx512_psubs_w_512; 1437 else 1438 llvm_unreachable("Unexpected intrinsic"); 1439 } else 1440 return false; 1441 1442 SmallVector<Value *, 4> Args(CI.arg_operands().begin(), 1443 CI.arg_operands().end()); 1444 Args.pop_back(); 1445 Args.pop_back(); 1446 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI.getModule(), IID), 1447 Args); 1448 unsigned NumArgs = CI.getNumArgOperands(); 1449 Rep = EmitX86Select(Builder, CI.getArgOperand(NumArgs - 1), Rep, 1450 CI.getArgOperand(NumArgs - 2)); 1451 return true; 1452 } 1453 1454 /// Upgrade comment in call to inline asm that represents an objc retain release 1455 /// marker. 1456 void llvm::UpgradeInlineAsmString(std::string *AsmStr) { 1457 size_t Pos; 1458 if (AsmStr->find("mov\tfp") == 0 && 1459 AsmStr->find("objc_retainAutoreleaseReturnValue") != std::string::npos && 1460 (Pos = AsmStr->find("# marker")) != std::string::npos) { 1461 AsmStr->replace(Pos, 1, ";"); 1462 } 1463 return; 1464 } 1465 1466 /// Upgrade a call to an old intrinsic. All argument and return casting must be 1467 /// provided to seamlessly integrate with existing context. 1468 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { 1469 Function *F = CI->getCalledFunction(); 1470 LLVMContext &C = CI->getContext(); 1471 IRBuilder<> Builder(C); 1472 Builder.SetInsertPoint(CI->getParent(), CI->getIterator()); 1473 1474 assert(F && "Intrinsic call is not direct?"); 1475 1476 if (!NewFn) { 1477 // Get the Function's name. 1478 StringRef Name = F->getName(); 1479 1480 assert(Name.startswith("llvm.") && "Intrinsic doesn't start with 'llvm.'"); 1481 Name = Name.substr(5); 1482 1483 bool IsX86 = Name.startswith("x86."); 1484 if (IsX86) 1485 Name = Name.substr(4); 1486 bool IsNVVM = Name.startswith("nvvm."); 1487 if (IsNVVM) 1488 Name = Name.substr(5); 1489 1490 if (IsX86 && Name.startswith("sse4a.movnt.")) { 1491 Module *M = F->getParent(); 1492 SmallVector<Metadata *, 1> Elts; 1493 Elts.push_back( 1494 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1495 MDNode *Node = MDNode::get(C, Elts); 1496 1497 Value *Arg0 = CI->getArgOperand(0); 1498 Value *Arg1 = CI->getArgOperand(1); 1499 1500 // Nontemporal (unaligned) store of the 0'th element of the float/double 1501 // vector. 1502 Type *SrcEltTy = cast<VectorType>(Arg1->getType())->getElementType(); 1503 PointerType *EltPtrTy = PointerType::getUnqual(SrcEltTy); 1504 Value *Addr = Builder.CreateBitCast(Arg0, EltPtrTy, "cast"); 1505 Value *Extract = 1506 Builder.CreateExtractElement(Arg1, (uint64_t)0, "extractelement"); 1507 1508 StoreInst *SI = Builder.CreateAlignedStore(Extract, Addr, 1); 1509 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1510 1511 // Remove intrinsic. 1512 CI->eraseFromParent(); 1513 return; 1514 } 1515 1516 if (IsX86 && (Name.startswith("avx.movnt.") || 1517 Name.startswith("avx512.storent."))) { 1518 Module *M = F->getParent(); 1519 SmallVector<Metadata *, 1> Elts; 1520 Elts.push_back( 1521 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 1522 MDNode *Node = MDNode::get(C, Elts); 1523 1524 Value *Arg0 = CI->getArgOperand(0); 1525 Value *Arg1 = CI->getArgOperand(1); 1526 1527 // Convert the type of the pointer to a pointer to the stored type. 1528 Value *BC = Builder.CreateBitCast(Arg0, 1529 PointerType::getUnqual(Arg1->getType()), 1530 "cast"); 1531 VectorType *VTy = cast<VectorType>(Arg1->getType()); 1532 StoreInst *SI = Builder.CreateAlignedStore(Arg1, BC, 1533 VTy->getBitWidth() / 8); 1534 SI->setMetadata(M->getMDKindID("nontemporal"), Node); 1535 1536 // Remove intrinsic. 1537 CI->eraseFromParent(); 1538 return; 1539 } 1540 1541 if (IsX86 && Name == "sse2.storel.dq") { 1542 Value *Arg0 = CI->getArgOperand(0); 1543 Value *Arg1 = CI->getArgOperand(1); 1544 1545 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 1546 Value *BC0 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 1547 Value *Elt = Builder.CreateExtractElement(BC0, (uint64_t)0); 1548 Value *BC = Builder.CreateBitCast(Arg0, 1549 PointerType::getUnqual(Elt->getType()), 1550 "cast"); 1551 Builder.CreateAlignedStore(Elt, BC, 1); 1552 1553 // Remove intrinsic. 1554 CI->eraseFromParent(); 1555 return; 1556 } 1557 1558 if (IsX86 && (Name.startswith("sse.storeu.") || 1559 Name.startswith("sse2.storeu.") || 1560 Name.startswith("avx.storeu."))) { 1561 Value *Arg0 = CI->getArgOperand(0); 1562 Value *Arg1 = CI->getArgOperand(1); 1563 1564 Arg0 = Builder.CreateBitCast(Arg0, 1565 PointerType::getUnqual(Arg1->getType()), 1566 "cast"); 1567 Builder.CreateAlignedStore(Arg1, Arg0, 1); 1568 1569 // Remove intrinsic. 1570 CI->eraseFromParent(); 1571 return; 1572 } 1573 1574 if (IsX86 && Name == "avx512.mask.store.ss") { 1575 Value *Mask = Builder.CreateAnd(CI->getArgOperand(2), Builder.getInt8(1)); 1576 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1577 Mask, false); 1578 1579 // Remove intrinsic. 1580 CI->eraseFromParent(); 1581 return; 1582 } 1583 1584 if (IsX86 && (Name.startswith("avx512.mask.store"))) { 1585 // "avx512.mask.storeu." or "avx512.mask.store." 1586 bool Aligned = Name[17] != 'u'; // "avx512.mask.storeu". 1587 UpgradeMaskedStore(Builder, CI->getArgOperand(0), CI->getArgOperand(1), 1588 CI->getArgOperand(2), Aligned); 1589 1590 // Remove intrinsic. 1591 CI->eraseFromParent(); 1592 return; 1593 } 1594 1595 Value *Rep; 1596 // Upgrade packed integer vector compare intrinsics to compare instructions. 1597 if (IsX86 && (Name.startswith("sse2.pcmp") || 1598 Name.startswith("avx2.pcmp"))) { 1599 // "sse2.pcpmpeq." "sse2.pcmpgt." "avx2.pcmpeq." or "avx2.pcmpgt." 1600 bool CmpEq = Name[9] == 'e'; 1601 Rep = Builder.CreateICmp(CmpEq ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_SGT, 1602 CI->getArgOperand(0), CI->getArgOperand(1)); 1603 Rep = Builder.CreateSExt(Rep, CI->getType(), ""); 1604 } else if (IsX86 && (Name.startswith("avx512.broadcastm"))) { 1605 Type *ExtTy = Type::getInt32Ty(C); 1606 if (CI->getOperand(0)->getType()->isIntegerTy(8)) 1607 ExtTy = Type::getInt64Ty(C); 1608 unsigned NumElts = CI->getType()->getPrimitiveSizeInBits() / 1609 ExtTy->getPrimitiveSizeInBits(); 1610 Rep = Builder.CreateZExt(CI->getArgOperand(0), ExtTy); 1611 Rep = Builder.CreateVectorSplat(NumElts, Rep); 1612 } else if (IsX86 && (Name == "sse.sqrt.ss" || 1613 Name == "sse2.sqrt.sd")) { 1614 Value *Vec = CI->getArgOperand(0); 1615 Value *Elt0 = Builder.CreateExtractElement(Vec, (uint64_t)0); 1616 Function *Intr = Intrinsic::getDeclaration(F->getParent(), 1617 Intrinsic::sqrt, Elt0->getType()); 1618 Elt0 = Builder.CreateCall(Intr, Elt0); 1619 Rep = Builder.CreateInsertElement(Vec, Elt0, (uint64_t)0); 1620 } else if (IsX86 && (Name.startswith("avx.sqrt.p") || 1621 Name.startswith("sse2.sqrt.p") || 1622 Name.startswith("sse.sqrt.p"))) { 1623 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1624 Intrinsic::sqrt, 1625 CI->getType()), 1626 {CI->getArgOperand(0)}); 1627 } else if (IsX86 && (Name.startswith("avx512.mask.sqrt.p"))) { 1628 if (CI->getNumArgOperands() == 4 && 1629 (!isa<ConstantInt>(CI->getArgOperand(3)) || 1630 cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue() != 4)) { 1631 Intrinsic::ID IID = Name[18] == 's' ? Intrinsic::x86_avx512_sqrt_ps_512 1632 : Intrinsic::x86_avx512_sqrt_pd_512; 1633 1634 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(3) }; 1635 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 1636 IID), Args); 1637 } else { 1638 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 1639 Intrinsic::sqrt, 1640 CI->getType()), 1641 {CI->getArgOperand(0)}); 1642 } 1643 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1644 CI->getArgOperand(1)); 1645 } else if (IsX86 && (Name.startswith("avx512.ptestm") || 1646 Name.startswith("avx512.ptestnm"))) { 1647 Value *Op0 = CI->getArgOperand(0); 1648 Value *Op1 = CI->getArgOperand(1); 1649 Value *Mask = CI->getArgOperand(2); 1650 Rep = Builder.CreateAnd(Op0, Op1); 1651 llvm::Type *Ty = Op0->getType(); 1652 Value *Zero = llvm::Constant::getNullValue(Ty); 1653 ICmpInst::Predicate Pred = 1654 Name.startswith("avx512.ptestm") ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ; 1655 Rep = Builder.CreateICmp(Pred, Rep, Zero); 1656 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, Mask); 1657 } else if (IsX86 && (Name.startswith("avx512.mask.pbroadcast"))){ 1658 unsigned NumElts = 1659 CI->getArgOperand(1)->getType()->getVectorNumElements(); 1660 Rep = Builder.CreateVectorSplat(NumElts, CI->getArgOperand(0)); 1661 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1662 CI->getArgOperand(1)); 1663 } else if (IsX86 && (Name.startswith("avx512.kunpck"))) { 1664 unsigned NumElts = CI->getType()->getScalarSizeInBits(); 1665 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), NumElts); 1666 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), NumElts); 1667 uint32_t Indices[64]; 1668 for (unsigned i = 0; i != NumElts; ++i) 1669 Indices[i] = i; 1670 1671 // First extract half of each vector. This gives better codegen than 1672 // doing it in a single shuffle. 1673 LHS = Builder.CreateShuffleVector(LHS, LHS, 1674 makeArrayRef(Indices, NumElts / 2)); 1675 RHS = Builder.CreateShuffleVector(RHS, RHS, 1676 makeArrayRef(Indices, NumElts / 2)); 1677 // Concat the vectors. 1678 // NOTE: Operands have to be swapped to match intrinsic definition. 1679 Rep = Builder.CreateShuffleVector(RHS, LHS, 1680 makeArrayRef(Indices, NumElts)); 1681 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1682 } else if (IsX86 && Name == "avx512.kand.w") { 1683 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1684 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1685 Rep = Builder.CreateAnd(LHS, RHS); 1686 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1687 } else if (IsX86 && Name == "avx512.kandn.w") { 1688 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1689 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1690 LHS = Builder.CreateNot(LHS); 1691 Rep = Builder.CreateAnd(LHS, RHS); 1692 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1693 } else if (IsX86 && Name == "avx512.kor.w") { 1694 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1695 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1696 Rep = Builder.CreateOr(LHS, RHS); 1697 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1698 } else if (IsX86 && Name == "avx512.kxor.w") { 1699 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1700 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1701 Rep = Builder.CreateXor(LHS, RHS); 1702 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1703 } else if (IsX86 && Name == "avx512.kxnor.w") { 1704 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1705 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1706 LHS = Builder.CreateNot(LHS); 1707 Rep = Builder.CreateXor(LHS, RHS); 1708 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1709 } else if (IsX86 && Name == "avx512.knot.w") { 1710 Rep = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1711 Rep = Builder.CreateNot(Rep); 1712 Rep = Builder.CreateBitCast(Rep, CI->getType()); 1713 } else if (IsX86 && 1714 (Name == "avx512.kortestz.w" || Name == "avx512.kortestc.w")) { 1715 Value *LHS = getX86MaskVec(Builder, CI->getArgOperand(0), 16); 1716 Value *RHS = getX86MaskVec(Builder, CI->getArgOperand(1), 16); 1717 Rep = Builder.CreateOr(LHS, RHS); 1718 Rep = Builder.CreateBitCast(Rep, Builder.getInt16Ty()); 1719 Value *C; 1720 if (Name[14] == 'c') 1721 C = ConstantInt::getAllOnesValue(Builder.getInt16Ty()); 1722 else 1723 C = ConstantInt::getNullValue(Builder.getInt16Ty()); 1724 Rep = Builder.CreateICmpEQ(Rep, C); 1725 Rep = Builder.CreateZExt(Rep, Builder.getInt32Ty()); 1726 } else if (IsX86 && (Name == "sse.add.ss" || Name == "sse2.add.sd" || 1727 Name == "sse.sub.ss" || Name == "sse2.sub.sd" || 1728 Name == "sse.mul.ss" || Name == "sse2.mul.sd" || 1729 Name == "sse.div.ss" || Name == "sse2.div.sd")) { 1730 Type *I32Ty = Type::getInt32Ty(C); 1731 Value *Elt0 = Builder.CreateExtractElement(CI->getArgOperand(0), 1732 ConstantInt::get(I32Ty, 0)); 1733 Value *Elt1 = Builder.CreateExtractElement(CI->getArgOperand(1), 1734 ConstantInt::get(I32Ty, 0)); 1735 Value *EltOp; 1736 if (Name.contains(".add.")) 1737 EltOp = Builder.CreateFAdd(Elt0, Elt1); 1738 else if (Name.contains(".sub.")) 1739 EltOp = Builder.CreateFSub(Elt0, Elt1); 1740 else if (Name.contains(".mul.")) 1741 EltOp = Builder.CreateFMul(Elt0, Elt1); 1742 else 1743 EltOp = Builder.CreateFDiv(Elt0, Elt1); 1744 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), EltOp, 1745 ConstantInt::get(I32Ty, 0)); 1746 } else if (IsX86 && Name.startswith("avx512.mask.pcmp")) { 1747 // "avx512.mask.pcmpeq." or "avx512.mask.pcmpgt." 1748 bool CmpEq = Name[16] == 'e'; 1749 Rep = upgradeMaskedCompare(Builder, *CI, CmpEq ? 0 : 6, true); 1750 } else if (IsX86 && Name.startswith("avx512.mask.fpclass.p")) { 1751 Type *OpTy = CI->getArgOperand(0)->getType(); 1752 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1753 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1754 Intrinsic::ID IID; 1755 if (VecWidth == 128 && EltWidth == 32) 1756 IID = Intrinsic::x86_avx512_fpclass_ps_128; 1757 else if (VecWidth == 256 && EltWidth == 32) 1758 IID = Intrinsic::x86_avx512_fpclass_ps_256; 1759 else if (VecWidth == 512 && EltWidth == 32) 1760 IID = Intrinsic::x86_avx512_fpclass_ps_512; 1761 else if (VecWidth == 128 && EltWidth == 64) 1762 IID = Intrinsic::x86_avx512_fpclass_pd_128; 1763 else if (VecWidth == 256 && EltWidth == 64) 1764 IID = Intrinsic::x86_avx512_fpclass_pd_256; 1765 else if (VecWidth == 512 && EltWidth == 64) 1766 IID = Intrinsic::x86_avx512_fpclass_pd_512; 1767 else 1768 llvm_unreachable("Unexpected intrinsic"); 1769 1770 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1771 { CI->getOperand(0), CI->getArgOperand(1) }); 1772 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(2)); 1773 } else if (IsX86 && Name.startswith("avx512.mask.cmp.p")) { 1774 Type *OpTy = CI->getArgOperand(0)->getType(); 1775 unsigned VecWidth = OpTy->getPrimitiveSizeInBits(); 1776 unsigned EltWidth = OpTy->getScalarSizeInBits(); 1777 Intrinsic::ID IID; 1778 if (VecWidth == 128 && EltWidth == 32) 1779 IID = Intrinsic::x86_avx512_cmp_ps_128; 1780 else if (VecWidth == 256 && EltWidth == 32) 1781 IID = Intrinsic::x86_avx512_cmp_ps_256; 1782 else if (VecWidth == 512 && EltWidth == 32) 1783 IID = Intrinsic::x86_avx512_cmp_ps_512; 1784 else if (VecWidth == 128 && EltWidth == 64) 1785 IID = Intrinsic::x86_avx512_cmp_pd_128; 1786 else if (VecWidth == 256 && EltWidth == 64) 1787 IID = Intrinsic::x86_avx512_cmp_pd_256; 1788 else if (VecWidth == 512 && EltWidth == 64) 1789 IID = Intrinsic::x86_avx512_cmp_pd_512; 1790 else 1791 llvm_unreachable("Unexpected intrinsic"); 1792 1793 SmallVector<Value *, 4> Args; 1794 Args.push_back(CI->getArgOperand(0)); 1795 Args.push_back(CI->getArgOperand(1)); 1796 Args.push_back(CI->getArgOperand(2)); 1797 if (CI->getNumArgOperands() == 5) 1798 Args.push_back(CI->getArgOperand(4)); 1799 1800 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 1801 Args); 1802 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, CI->getArgOperand(3)); 1803 } else if (IsX86 && Name.startswith("avx512.mask.cmp.") && 1804 Name[16] != 'p') { 1805 // Integer compare intrinsics. 1806 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1807 Rep = upgradeMaskedCompare(Builder, *CI, Imm, true); 1808 } else if (IsX86 && Name.startswith("avx512.mask.ucmp.")) { 1809 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 1810 Rep = upgradeMaskedCompare(Builder, *CI, Imm, false); 1811 } else if (IsX86 && (Name.startswith("avx512.cvtb2mask.") || 1812 Name.startswith("avx512.cvtw2mask.") || 1813 Name.startswith("avx512.cvtd2mask.") || 1814 Name.startswith("avx512.cvtq2mask."))) { 1815 Value *Op = CI->getArgOperand(0); 1816 Value *Zero = llvm::Constant::getNullValue(Op->getType()); 1817 Rep = Builder.CreateICmp(ICmpInst::ICMP_SLT, Op, Zero); 1818 Rep = ApplyX86MaskOn1BitsVec(Builder, Rep, nullptr); 1819 } else if(IsX86 && (Name == "ssse3.pabs.b.128" || 1820 Name == "ssse3.pabs.w.128" || 1821 Name == "ssse3.pabs.d.128" || 1822 Name.startswith("avx2.pabs") || 1823 Name.startswith("avx512.mask.pabs"))) { 1824 Rep = upgradeAbs(Builder, *CI); 1825 } else if (IsX86 && (Name == "sse41.pmaxsb" || 1826 Name == "sse2.pmaxs.w" || 1827 Name == "sse41.pmaxsd" || 1828 Name.startswith("avx2.pmaxs") || 1829 Name.startswith("avx512.mask.pmaxs"))) { 1830 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SGT); 1831 } else if (IsX86 && (Name == "sse2.pmaxu.b" || 1832 Name == "sse41.pmaxuw" || 1833 Name == "sse41.pmaxud" || 1834 Name.startswith("avx2.pmaxu") || 1835 Name.startswith("avx512.mask.pmaxu"))) { 1836 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_UGT); 1837 } else if (IsX86 && (Name == "sse41.pminsb" || 1838 Name == "sse2.pmins.w" || 1839 Name == "sse41.pminsd" || 1840 Name.startswith("avx2.pmins") || 1841 Name.startswith("avx512.mask.pmins"))) { 1842 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_SLT); 1843 } else if (IsX86 && (Name == "sse2.pminu.b" || 1844 Name == "sse41.pminuw" || 1845 Name == "sse41.pminud" || 1846 Name.startswith("avx2.pminu") || 1847 Name.startswith("avx512.mask.pminu"))) { 1848 Rep = upgradeIntMinMax(Builder, *CI, ICmpInst::ICMP_ULT); 1849 } else if (IsX86 && (Name == "sse2.pmulu.dq" || 1850 Name == "avx2.pmulu.dq" || 1851 Name == "avx512.pmulu.dq.512" || 1852 Name.startswith("avx512.mask.pmulu.dq."))) { 1853 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/false); 1854 } else if (IsX86 && (Name == "sse41.pmuldq" || 1855 Name == "avx2.pmul.dq" || 1856 Name == "avx512.pmul.dq.512" || 1857 Name.startswith("avx512.mask.pmul.dq."))) { 1858 Rep = upgradePMULDQ(Builder, *CI, /*Signed*/true); 1859 } else if (IsX86 && (Name == "sse.cvtsi2ss" || 1860 Name == "sse2.cvtsi2sd" || 1861 Name == "sse.cvtsi642ss" || 1862 Name == "sse2.cvtsi642sd")) { 1863 Rep = Builder.CreateSIToFP(CI->getArgOperand(1), 1864 CI->getType()->getVectorElementType()); 1865 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1866 } else if (IsX86 && Name == "avx512.cvtusi2sd") { 1867 Rep = Builder.CreateUIToFP(CI->getArgOperand(1), 1868 CI->getType()->getVectorElementType()); 1869 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1870 } else if (IsX86 && Name == "sse2.cvtss2sd") { 1871 Rep = Builder.CreateExtractElement(CI->getArgOperand(1), (uint64_t)0); 1872 Rep = Builder.CreateFPExt(Rep, CI->getType()->getVectorElementType()); 1873 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, (uint64_t)0); 1874 } else if (IsX86 && (Name == "sse2.cvtdq2pd" || 1875 Name == "sse2.cvtdq2ps" || 1876 Name == "avx.cvtdq2.pd.256" || 1877 Name == "avx.cvtdq2.ps.256" || 1878 Name.startswith("avx512.mask.cvtdq2pd.") || 1879 Name.startswith("avx512.mask.cvtudq2pd.") || 1880 Name == "avx512.mask.cvtdq2ps.128" || 1881 Name == "avx512.mask.cvtdq2ps.256" || 1882 Name == "avx512.mask.cvtudq2ps.128" || 1883 Name == "avx512.mask.cvtudq2ps.256" || 1884 Name == "avx512.mask.cvtqq2pd.128" || 1885 Name == "avx512.mask.cvtqq2pd.256" || 1886 Name == "avx512.mask.cvtuqq2pd.128" || 1887 Name == "avx512.mask.cvtuqq2pd.256" || 1888 Name == "sse2.cvtps2pd" || 1889 Name == "avx.cvt.ps2.pd.256" || 1890 Name == "avx512.mask.cvtps2pd.128" || 1891 Name == "avx512.mask.cvtps2pd.256")) { 1892 Type *DstTy = CI->getType(); 1893 Rep = CI->getArgOperand(0); 1894 1895 unsigned NumDstElts = DstTy->getVectorNumElements(); 1896 if (NumDstElts < Rep->getType()->getVectorNumElements()) { 1897 assert(NumDstElts == 2 && "Unexpected vector size"); 1898 uint32_t ShuffleMask[2] = { 0, 1 }; 1899 Rep = Builder.CreateShuffleVector(Rep, Rep, ShuffleMask); 1900 } 1901 1902 bool IsPS2PD = (StringRef::npos != Name.find("ps2")); 1903 bool IsUnsigned = (StringRef::npos != Name.find("cvtu")); 1904 if (IsPS2PD) 1905 Rep = Builder.CreateFPExt(Rep, DstTy, "cvtps2pd"); 1906 else if (IsUnsigned) 1907 Rep = Builder.CreateUIToFP(Rep, DstTy, "cvt"); 1908 else 1909 Rep = Builder.CreateSIToFP(Rep, DstTy, "cvt"); 1910 1911 if (CI->getNumArgOperands() == 3) 1912 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 1913 CI->getArgOperand(1)); 1914 } else if (IsX86 && (Name.startswith("avx512.mask.loadu."))) { 1915 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1916 CI->getArgOperand(1), CI->getArgOperand(2), 1917 /*Aligned*/false); 1918 } else if (IsX86 && (Name.startswith("avx512.mask.load."))) { 1919 Rep = UpgradeMaskedLoad(Builder, CI->getArgOperand(0), 1920 CI->getArgOperand(1),CI->getArgOperand(2), 1921 /*Aligned*/true); 1922 } else if (IsX86 && Name.startswith("avx512.mask.expand.load.")) { 1923 Type *ResultTy = CI->getType(); 1924 Type *PtrTy = ResultTy->getVectorElementType(); 1925 1926 // Cast the pointer to element type. 1927 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 1928 llvm::PointerType::getUnqual(PtrTy)); 1929 1930 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 1931 ResultTy->getVectorNumElements()); 1932 1933 Function *ELd = Intrinsic::getDeclaration(F->getParent(), 1934 Intrinsic::masked_expandload, 1935 ResultTy); 1936 Rep = Builder.CreateCall(ELd, { Ptr, MaskVec, CI->getOperand(1) }); 1937 } else if (IsX86 && Name.startswith("avx512.mask.compress.store.")) { 1938 Type *ResultTy = CI->getArgOperand(1)->getType(); 1939 Type *PtrTy = ResultTy->getVectorElementType(); 1940 1941 // Cast the pointer to element type. 1942 Value *Ptr = Builder.CreateBitCast(CI->getOperand(0), 1943 llvm::PointerType::getUnqual(PtrTy)); 1944 1945 Value *MaskVec = getX86MaskVec(Builder, CI->getArgOperand(2), 1946 ResultTy->getVectorNumElements()); 1947 1948 Function *CSt = Intrinsic::getDeclaration(F->getParent(), 1949 Intrinsic::masked_compressstore, 1950 ResultTy); 1951 Rep = Builder.CreateCall(CSt, { CI->getArgOperand(1), Ptr, MaskVec }); 1952 } else if (IsX86 && Name.startswith("xop.vpcom")) { 1953 Intrinsic::ID intID; 1954 if (Name.endswith("ub")) 1955 intID = Intrinsic::x86_xop_vpcomub; 1956 else if (Name.endswith("uw")) 1957 intID = Intrinsic::x86_xop_vpcomuw; 1958 else if (Name.endswith("ud")) 1959 intID = Intrinsic::x86_xop_vpcomud; 1960 else if (Name.endswith("uq")) 1961 intID = Intrinsic::x86_xop_vpcomuq; 1962 else if (Name.endswith("b")) 1963 intID = Intrinsic::x86_xop_vpcomb; 1964 else if (Name.endswith("w")) 1965 intID = Intrinsic::x86_xop_vpcomw; 1966 else if (Name.endswith("d")) 1967 intID = Intrinsic::x86_xop_vpcomd; 1968 else if (Name.endswith("q")) 1969 intID = Intrinsic::x86_xop_vpcomq; 1970 else 1971 llvm_unreachable("Unknown suffix"); 1972 1973 Name = Name.substr(9); // strip off "xop.vpcom" 1974 unsigned Imm; 1975 if (Name.startswith("lt")) 1976 Imm = 0; 1977 else if (Name.startswith("le")) 1978 Imm = 1; 1979 else if (Name.startswith("gt")) 1980 Imm = 2; 1981 else if (Name.startswith("ge")) 1982 Imm = 3; 1983 else if (Name.startswith("eq")) 1984 Imm = 4; 1985 else if (Name.startswith("ne")) 1986 Imm = 5; 1987 else if (Name.startswith("false")) 1988 Imm = 6; 1989 else if (Name.startswith("true")) 1990 Imm = 7; 1991 else 1992 llvm_unreachable("Unknown condition"); 1993 1994 Function *VPCOM = Intrinsic::getDeclaration(F->getParent(), intID); 1995 Rep = 1996 Builder.CreateCall(VPCOM, {CI->getArgOperand(0), CI->getArgOperand(1), 1997 Builder.getInt8(Imm)}); 1998 } else if (IsX86 && Name.startswith("xop.vpcmov")) { 1999 Value *Sel = CI->getArgOperand(2); 2000 Value *NotSel = Builder.CreateNot(Sel); 2001 Value *Sel0 = Builder.CreateAnd(CI->getArgOperand(0), Sel); 2002 Value *Sel1 = Builder.CreateAnd(CI->getArgOperand(1), NotSel); 2003 Rep = Builder.CreateOr(Sel0, Sel1); 2004 } else if (IsX86 && Name == "sse42.crc32.64.8") { 2005 Function *CRC32 = Intrinsic::getDeclaration(F->getParent(), 2006 Intrinsic::x86_sse42_crc32_32_8); 2007 Value *Trunc0 = Builder.CreateTrunc(CI->getArgOperand(0), Type::getInt32Ty(C)); 2008 Rep = Builder.CreateCall(CRC32, {Trunc0, CI->getArgOperand(1)}); 2009 Rep = Builder.CreateZExt(Rep, CI->getType(), ""); 2010 } else if (IsX86 && (Name.startswith("avx.vbroadcast.s") || 2011 Name.startswith("avx512.vbroadcast.s"))) { 2012 // Replace broadcasts with a series of insertelements. 2013 Type *VecTy = CI->getType(); 2014 Type *EltTy = VecTy->getVectorElementType(); 2015 unsigned EltNum = VecTy->getVectorNumElements(); 2016 Value *Cast = Builder.CreateBitCast(CI->getArgOperand(0), 2017 EltTy->getPointerTo()); 2018 Value *Load = Builder.CreateLoad(EltTy, Cast); 2019 Type *I32Ty = Type::getInt32Ty(C); 2020 Rep = UndefValue::get(VecTy); 2021 for (unsigned I = 0; I < EltNum; ++I) 2022 Rep = Builder.CreateInsertElement(Rep, Load, 2023 ConstantInt::get(I32Ty, I)); 2024 } else if (IsX86 && (Name.startswith("sse41.pmovsx") || 2025 Name.startswith("sse41.pmovzx") || 2026 Name.startswith("avx2.pmovsx") || 2027 Name.startswith("avx2.pmovzx") || 2028 Name.startswith("avx512.mask.pmovsx") || 2029 Name.startswith("avx512.mask.pmovzx"))) { 2030 VectorType *SrcTy = cast<VectorType>(CI->getArgOperand(0)->getType()); 2031 VectorType *DstTy = cast<VectorType>(CI->getType()); 2032 unsigned NumDstElts = DstTy->getNumElements(); 2033 2034 // Extract a subvector of the first NumDstElts lanes and sign/zero extend. 2035 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2036 for (unsigned i = 0; i != NumDstElts; ++i) 2037 ShuffleMask[i] = i; 2038 2039 Value *SV = Builder.CreateShuffleVector( 2040 CI->getArgOperand(0), UndefValue::get(SrcTy), ShuffleMask); 2041 2042 bool DoSext = (StringRef::npos != Name.find("pmovsx")); 2043 Rep = DoSext ? Builder.CreateSExt(SV, DstTy) 2044 : Builder.CreateZExt(SV, DstTy); 2045 // If there are 3 arguments, it's a masked intrinsic so we need a select. 2046 if (CI->getNumArgOperands() == 3) 2047 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2048 CI->getArgOperand(1)); 2049 } else if (IsX86 && (Name.startswith("avx.vbroadcastf128") || 2050 Name == "avx2.vbroadcasti128")) { 2051 // Replace vbroadcastf128/vbroadcasti128 with a vector load+shuffle. 2052 Type *EltTy = CI->getType()->getVectorElementType(); 2053 unsigned NumSrcElts = 128 / EltTy->getPrimitiveSizeInBits(); 2054 Type *VT = VectorType::get(EltTy, NumSrcElts); 2055 Value *Op = Builder.CreatePointerCast(CI->getArgOperand(0), 2056 PointerType::getUnqual(VT)); 2057 Value *Load = Builder.CreateAlignedLoad(Op, 1); 2058 if (NumSrcElts == 2) 2059 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2060 { 0, 1, 0, 1 }); 2061 else 2062 Rep = Builder.CreateShuffleVector(Load, UndefValue::get(Load->getType()), 2063 { 0, 1, 2, 3, 0, 1, 2, 3 }); 2064 } else if (IsX86 && (Name.startswith("avx512.mask.shuf.i") || 2065 Name.startswith("avx512.mask.shuf.f"))) { 2066 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2067 Type *VT = CI->getType(); 2068 unsigned NumLanes = VT->getPrimitiveSizeInBits() / 128; 2069 unsigned NumElementsInLane = 128 / VT->getScalarSizeInBits(); 2070 unsigned ControlBitsMask = NumLanes - 1; 2071 unsigned NumControlBits = NumLanes / 2; 2072 SmallVector<uint32_t, 8> ShuffleMask(0); 2073 2074 for (unsigned l = 0; l != NumLanes; ++l) { 2075 unsigned LaneMask = (Imm >> (l * NumControlBits)) & ControlBitsMask; 2076 // We actually need the other source. 2077 if (l >= NumLanes / 2) 2078 LaneMask += NumLanes; 2079 for (unsigned i = 0; i != NumElementsInLane; ++i) 2080 ShuffleMask.push_back(LaneMask * NumElementsInLane + i); 2081 } 2082 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2083 CI->getArgOperand(1), ShuffleMask); 2084 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2085 CI->getArgOperand(3)); 2086 }else if (IsX86 && (Name.startswith("avx512.mask.broadcastf") || 2087 Name.startswith("avx512.mask.broadcasti"))) { 2088 unsigned NumSrcElts = 2089 CI->getArgOperand(0)->getType()->getVectorNumElements(); 2090 unsigned NumDstElts = CI->getType()->getVectorNumElements(); 2091 2092 SmallVector<uint32_t, 8> ShuffleMask(NumDstElts); 2093 for (unsigned i = 0; i != NumDstElts; ++i) 2094 ShuffleMask[i] = i % NumSrcElts; 2095 2096 Rep = Builder.CreateShuffleVector(CI->getArgOperand(0), 2097 CI->getArgOperand(0), 2098 ShuffleMask); 2099 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2100 CI->getArgOperand(1)); 2101 } else if (IsX86 && (Name.startswith("avx2.pbroadcast") || 2102 Name.startswith("avx2.vbroadcast") || 2103 Name.startswith("avx512.pbroadcast") || 2104 Name.startswith("avx512.mask.broadcast.s"))) { 2105 // Replace vp?broadcasts with a vector shuffle. 2106 Value *Op = CI->getArgOperand(0); 2107 unsigned NumElts = CI->getType()->getVectorNumElements(); 2108 Type *MaskTy = VectorType::get(Type::getInt32Ty(C), NumElts); 2109 Rep = Builder.CreateShuffleVector(Op, UndefValue::get(Op->getType()), 2110 Constant::getNullValue(MaskTy)); 2111 2112 if (CI->getNumArgOperands() == 3) 2113 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2114 CI->getArgOperand(1)); 2115 } else if (IsX86 && (Name.startswith("sse2.paddus.") || 2116 Name.startswith("sse2.psubus.") || 2117 Name.startswith("avx2.paddus.") || 2118 Name.startswith("avx2.psubus.") || 2119 Name.startswith("avx512.mask.paddus.") || 2120 Name.startswith("avx512.mask.psubus."))) { 2121 bool IsAdd = Name.contains(".paddus"); 2122 Rep = UpgradeX86AddSubSatIntrinsics(Builder, *CI, IsAdd); 2123 } else if (IsX86 && Name.startswith("avx512.mask.palignr.")) { 2124 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2125 CI->getArgOperand(1), 2126 CI->getArgOperand(2), 2127 CI->getArgOperand(3), 2128 CI->getArgOperand(4), 2129 false); 2130 } else if (IsX86 && Name.startswith("avx512.mask.valign.")) { 2131 Rep = UpgradeX86ALIGNIntrinsics(Builder, CI->getArgOperand(0), 2132 CI->getArgOperand(1), 2133 CI->getArgOperand(2), 2134 CI->getArgOperand(3), 2135 CI->getArgOperand(4), 2136 true); 2137 } else if (IsX86 && (Name == "sse2.psll.dq" || 2138 Name == "avx2.psll.dq")) { 2139 // 128/256-bit shift left specified in bits. 2140 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2141 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), 2142 Shift / 8); // Shift is in bits. 2143 } else if (IsX86 && (Name == "sse2.psrl.dq" || 2144 Name == "avx2.psrl.dq")) { 2145 // 128/256-bit shift right specified in bits. 2146 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2147 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), 2148 Shift / 8); // Shift is in bits. 2149 } else if (IsX86 && (Name == "sse2.psll.dq.bs" || 2150 Name == "avx2.psll.dq.bs" || 2151 Name == "avx512.psll.dq.512")) { 2152 // 128/256/512-bit shift left specified in bytes. 2153 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2154 Rep = UpgradeX86PSLLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2155 } else if (IsX86 && (Name == "sse2.psrl.dq.bs" || 2156 Name == "avx2.psrl.dq.bs" || 2157 Name == "avx512.psrl.dq.512")) { 2158 // 128/256/512-bit shift right specified in bytes. 2159 unsigned Shift = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2160 Rep = UpgradeX86PSRLDQIntrinsics(Builder, CI->getArgOperand(0), Shift); 2161 } else if (IsX86 && (Name == "sse41.pblendw" || 2162 Name.startswith("sse41.blendp") || 2163 Name.startswith("avx.blend.p") || 2164 Name == "avx2.pblendw" || 2165 Name.startswith("avx2.pblendd."))) { 2166 Value *Op0 = CI->getArgOperand(0); 2167 Value *Op1 = CI->getArgOperand(1); 2168 unsigned Imm = cast <ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2169 VectorType *VecTy = cast<VectorType>(CI->getType()); 2170 unsigned NumElts = VecTy->getNumElements(); 2171 2172 SmallVector<uint32_t, 16> Idxs(NumElts); 2173 for (unsigned i = 0; i != NumElts; ++i) 2174 Idxs[i] = ((Imm >> (i%8)) & 1) ? i + NumElts : i; 2175 2176 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2177 } else if (IsX86 && (Name.startswith("avx.vinsertf128.") || 2178 Name == "avx2.vinserti128" || 2179 Name.startswith("avx512.mask.insert"))) { 2180 Value *Op0 = CI->getArgOperand(0); 2181 Value *Op1 = CI->getArgOperand(1); 2182 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2183 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2184 unsigned SrcNumElts = Op1->getType()->getVectorNumElements(); 2185 unsigned Scale = DstNumElts / SrcNumElts; 2186 2187 // Mask off the high bits of the immediate value; hardware ignores those. 2188 Imm = Imm % Scale; 2189 2190 // Extend the second operand into a vector the size of the destination. 2191 Value *UndefV = UndefValue::get(Op1->getType()); 2192 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2193 for (unsigned i = 0; i != SrcNumElts; ++i) 2194 Idxs[i] = i; 2195 for (unsigned i = SrcNumElts; i != DstNumElts; ++i) 2196 Idxs[i] = SrcNumElts; 2197 Rep = Builder.CreateShuffleVector(Op1, UndefV, Idxs); 2198 2199 // Insert the second operand into the first operand. 2200 2201 // Note that there is no guarantee that instruction lowering will actually 2202 // produce a vinsertf128 instruction for the created shuffles. In 2203 // particular, the 0 immediate case involves no lane changes, so it can 2204 // be handled as a blend. 2205 2206 // Example of shuffle mask for 32-bit elements: 2207 // Imm = 1 <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11> 2208 // Imm = 0 <i32 8, i32 9, i32 10, i32 11, i32 4, i32 5, i32 6, i32 7 > 2209 2210 // First fill with identify mask. 2211 for (unsigned i = 0; i != DstNumElts; ++i) 2212 Idxs[i] = i; 2213 // Then replace the elements where we need to insert. 2214 for (unsigned i = 0; i != SrcNumElts; ++i) 2215 Idxs[i + Imm * SrcNumElts] = i + DstNumElts; 2216 Rep = Builder.CreateShuffleVector(Op0, Rep, Idxs); 2217 2218 // If the intrinsic has a mask operand, handle that. 2219 if (CI->getNumArgOperands() == 5) 2220 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2221 CI->getArgOperand(3)); 2222 } else if (IsX86 && (Name.startswith("avx.vextractf128.") || 2223 Name == "avx2.vextracti128" || 2224 Name.startswith("avx512.mask.vextract"))) { 2225 Value *Op0 = CI->getArgOperand(0); 2226 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2227 unsigned DstNumElts = CI->getType()->getVectorNumElements(); 2228 unsigned SrcNumElts = Op0->getType()->getVectorNumElements(); 2229 unsigned Scale = SrcNumElts / DstNumElts; 2230 2231 // Mask off the high bits of the immediate value; hardware ignores those. 2232 Imm = Imm % Scale; 2233 2234 // Get indexes for the subvector of the input vector. 2235 SmallVector<uint32_t, 8> Idxs(DstNumElts); 2236 for (unsigned i = 0; i != DstNumElts; ++i) { 2237 Idxs[i] = i + (Imm * DstNumElts); 2238 } 2239 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2240 2241 // If the intrinsic has a mask operand, handle that. 2242 if (CI->getNumArgOperands() == 4) 2243 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2244 CI->getArgOperand(2)); 2245 } else if (!IsX86 && Name == "stackprotectorcheck") { 2246 Rep = nullptr; 2247 } else if (IsX86 && (Name.startswith("avx512.mask.perm.df.") || 2248 Name.startswith("avx512.mask.perm.di."))) { 2249 Value *Op0 = CI->getArgOperand(0); 2250 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2251 VectorType *VecTy = cast<VectorType>(CI->getType()); 2252 unsigned NumElts = VecTy->getNumElements(); 2253 2254 SmallVector<uint32_t, 8> Idxs(NumElts); 2255 for (unsigned i = 0; i != NumElts; ++i) 2256 Idxs[i] = (i & ~0x3) + ((Imm >> (2 * (i & 0x3))) & 3); 2257 2258 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2259 2260 if (CI->getNumArgOperands() == 4) 2261 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2262 CI->getArgOperand(2)); 2263 } else if (IsX86 && (Name.startswith("avx.vperm2f128.") || 2264 Name == "avx2.vperm2i128")) { 2265 // The immediate permute control byte looks like this: 2266 // [1:0] - select 128 bits from sources for low half of destination 2267 // [2] - ignore 2268 // [3] - zero low half of destination 2269 // [5:4] - select 128 bits from sources for high half of destination 2270 // [6] - ignore 2271 // [7] - zero high half of destination 2272 2273 uint8_t Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2274 2275 unsigned NumElts = CI->getType()->getVectorNumElements(); 2276 unsigned HalfSize = NumElts / 2; 2277 SmallVector<uint32_t, 8> ShuffleMask(NumElts); 2278 2279 // Determine which operand(s) are actually in use for this instruction. 2280 Value *V0 = (Imm & 0x02) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2281 Value *V1 = (Imm & 0x20) ? CI->getArgOperand(1) : CI->getArgOperand(0); 2282 2283 // If needed, replace operands based on zero mask. 2284 V0 = (Imm & 0x08) ? ConstantAggregateZero::get(CI->getType()) : V0; 2285 V1 = (Imm & 0x80) ? ConstantAggregateZero::get(CI->getType()) : V1; 2286 2287 // Permute low half of result. 2288 unsigned StartIndex = (Imm & 0x01) ? HalfSize : 0; 2289 for (unsigned i = 0; i < HalfSize; ++i) 2290 ShuffleMask[i] = StartIndex + i; 2291 2292 // Permute high half of result. 2293 StartIndex = (Imm & 0x10) ? HalfSize : 0; 2294 for (unsigned i = 0; i < HalfSize; ++i) 2295 ShuffleMask[i + HalfSize] = NumElts + StartIndex + i; 2296 2297 Rep = Builder.CreateShuffleVector(V0, V1, ShuffleMask); 2298 2299 } else if (IsX86 && (Name.startswith("avx.vpermil.") || 2300 Name == "sse2.pshuf.d" || 2301 Name.startswith("avx512.mask.vpermil.p") || 2302 Name.startswith("avx512.mask.pshuf.d."))) { 2303 Value *Op0 = CI->getArgOperand(0); 2304 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2305 VectorType *VecTy = cast<VectorType>(CI->getType()); 2306 unsigned NumElts = VecTy->getNumElements(); 2307 // Calculate the size of each index in the immediate. 2308 unsigned IdxSize = 64 / VecTy->getScalarSizeInBits(); 2309 unsigned IdxMask = ((1 << IdxSize) - 1); 2310 2311 SmallVector<uint32_t, 8> Idxs(NumElts); 2312 // Lookup the bits for this element, wrapping around the immediate every 2313 // 8-bits. Elements are grouped into sets of 2 or 4 elements so we need 2314 // to offset by the first index of each group. 2315 for (unsigned i = 0; i != NumElts; ++i) 2316 Idxs[i] = ((Imm >> ((i * IdxSize) % 8)) & IdxMask) | (i & ~IdxMask); 2317 2318 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2319 2320 if (CI->getNumArgOperands() == 4) 2321 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2322 CI->getArgOperand(2)); 2323 } else if (IsX86 && (Name == "sse2.pshufl.w" || 2324 Name.startswith("avx512.mask.pshufl.w."))) { 2325 Value *Op0 = CI->getArgOperand(0); 2326 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2327 unsigned NumElts = CI->getType()->getVectorNumElements(); 2328 2329 SmallVector<uint32_t, 16> Idxs(NumElts); 2330 for (unsigned l = 0; l != NumElts; l += 8) { 2331 for (unsigned i = 0; i != 4; ++i) 2332 Idxs[i + l] = ((Imm >> (2 * i)) & 0x3) + l; 2333 for (unsigned i = 4; i != 8; ++i) 2334 Idxs[i + l] = i + l; 2335 } 2336 2337 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2338 2339 if (CI->getNumArgOperands() == 4) 2340 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2341 CI->getArgOperand(2)); 2342 } else if (IsX86 && (Name == "sse2.pshufh.w" || 2343 Name.startswith("avx512.mask.pshufh.w."))) { 2344 Value *Op0 = CI->getArgOperand(0); 2345 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue(); 2346 unsigned NumElts = CI->getType()->getVectorNumElements(); 2347 2348 SmallVector<uint32_t, 16> Idxs(NumElts); 2349 for (unsigned l = 0; l != NumElts; l += 8) { 2350 for (unsigned i = 0; i != 4; ++i) 2351 Idxs[i + l] = i + l; 2352 for (unsigned i = 0; i != 4; ++i) 2353 Idxs[i + l + 4] = ((Imm >> (2 * i)) & 0x3) + 4 + l; 2354 } 2355 2356 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2357 2358 if (CI->getNumArgOperands() == 4) 2359 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2360 CI->getArgOperand(2)); 2361 } else if (IsX86 && Name.startswith("avx512.mask.shuf.p")) { 2362 Value *Op0 = CI->getArgOperand(0); 2363 Value *Op1 = CI->getArgOperand(1); 2364 unsigned Imm = cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue(); 2365 unsigned NumElts = CI->getType()->getVectorNumElements(); 2366 2367 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2368 unsigned HalfLaneElts = NumLaneElts / 2; 2369 2370 SmallVector<uint32_t, 16> Idxs(NumElts); 2371 for (unsigned i = 0; i != NumElts; ++i) { 2372 // Base index is the starting element of the lane. 2373 Idxs[i] = i - (i % NumLaneElts); 2374 // If we are half way through the lane switch to the other source. 2375 if ((i % NumLaneElts) >= HalfLaneElts) 2376 Idxs[i] += NumElts; 2377 // Now select the specific element. By adding HalfLaneElts bits from 2378 // the immediate. Wrapping around the immediate every 8-bits. 2379 Idxs[i] += (Imm >> ((i * HalfLaneElts) % 8)) & ((1 << HalfLaneElts) - 1); 2380 } 2381 2382 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2383 2384 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, 2385 CI->getArgOperand(3)); 2386 } else if (IsX86 && (Name.startswith("avx512.mask.movddup") || 2387 Name.startswith("avx512.mask.movshdup") || 2388 Name.startswith("avx512.mask.movsldup"))) { 2389 Value *Op0 = CI->getArgOperand(0); 2390 unsigned NumElts = CI->getType()->getVectorNumElements(); 2391 unsigned NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2392 2393 unsigned Offset = 0; 2394 if (Name.startswith("avx512.mask.movshdup.")) 2395 Offset = 1; 2396 2397 SmallVector<uint32_t, 16> Idxs(NumElts); 2398 for (unsigned l = 0; l != NumElts; l += NumLaneElts) 2399 for (unsigned i = 0; i != NumLaneElts; i += 2) { 2400 Idxs[i + l + 0] = i + l + Offset; 2401 Idxs[i + l + 1] = i + l + Offset; 2402 } 2403 2404 Rep = Builder.CreateShuffleVector(Op0, Op0, Idxs); 2405 2406 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2407 CI->getArgOperand(1)); 2408 } else if (IsX86 && (Name.startswith("avx512.mask.punpckl") || 2409 Name.startswith("avx512.mask.unpckl."))) { 2410 Value *Op0 = CI->getArgOperand(0); 2411 Value *Op1 = CI->getArgOperand(1); 2412 int NumElts = CI->getType()->getVectorNumElements(); 2413 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2414 2415 SmallVector<uint32_t, 64> Idxs(NumElts); 2416 for (int l = 0; l != NumElts; l += NumLaneElts) 2417 for (int i = 0; i != NumLaneElts; ++i) 2418 Idxs[i + l] = l + (i / 2) + NumElts * (i % 2); 2419 2420 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2421 2422 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2423 CI->getArgOperand(2)); 2424 } else if (IsX86 && (Name.startswith("avx512.mask.punpckh") || 2425 Name.startswith("avx512.mask.unpckh."))) { 2426 Value *Op0 = CI->getArgOperand(0); 2427 Value *Op1 = CI->getArgOperand(1); 2428 int NumElts = CI->getType()->getVectorNumElements(); 2429 int NumLaneElts = 128/CI->getType()->getScalarSizeInBits(); 2430 2431 SmallVector<uint32_t, 64> Idxs(NumElts); 2432 for (int l = 0; l != NumElts; l += NumLaneElts) 2433 for (int i = 0; i != NumLaneElts; ++i) 2434 Idxs[i + l] = (NumLaneElts / 2) + l + (i / 2) + NumElts * (i % 2); 2435 2436 Rep = Builder.CreateShuffleVector(Op0, Op1, Idxs); 2437 2438 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2439 CI->getArgOperand(2)); 2440 } else if (IsX86 && Name.startswith("avx512.mask.pand.")) { 2441 Rep = Builder.CreateAnd(CI->getArgOperand(0), CI->getArgOperand(1)); 2442 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2443 CI->getArgOperand(2)); 2444 } else if (IsX86 && Name.startswith("avx512.mask.pandn.")) { 2445 Rep = Builder.CreateAnd(Builder.CreateNot(CI->getArgOperand(0)), 2446 CI->getArgOperand(1)); 2447 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2448 CI->getArgOperand(2)); 2449 } else if (IsX86 && Name.startswith("avx512.mask.por.")) { 2450 Rep = Builder.CreateOr(CI->getArgOperand(0), CI->getArgOperand(1)); 2451 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2452 CI->getArgOperand(2)); 2453 } else if (IsX86 && Name.startswith("avx512.mask.pxor.")) { 2454 Rep = Builder.CreateXor(CI->getArgOperand(0), CI->getArgOperand(1)); 2455 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2456 CI->getArgOperand(2)); 2457 } else if (IsX86 && Name.startswith("avx512.mask.and.")) { 2458 VectorType *FTy = cast<VectorType>(CI->getType()); 2459 VectorType *ITy = VectorType::getInteger(FTy); 2460 Rep = Builder.CreateAnd(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2461 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2462 Rep = Builder.CreateBitCast(Rep, FTy); 2463 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2464 CI->getArgOperand(2)); 2465 } else if (IsX86 && Name.startswith("avx512.mask.andn.")) { 2466 VectorType *FTy = cast<VectorType>(CI->getType()); 2467 VectorType *ITy = VectorType::getInteger(FTy); 2468 Rep = Builder.CreateNot(Builder.CreateBitCast(CI->getArgOperand(0), ITy)); 2469 Rep = Builder.CreateAnd(Rep, 2470 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2471 Rep = Builder.CreateBitCast(Rep, FTy); 2472 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2473 CI->getArgOperand(2)); 2474 } else if (IsX86 && Name.startswith("avx512.mask.or.")) { 2475 VectorType *FTy = cast<VectorType>(CI->getType()); 2476 VectorType *ITy = VectorType::getInteger(FTy); 2477 Rep = Builder.CreateOr(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2478 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2479 Rep = Builder.CreateBitCast(Rep, FTy); 2480 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2481 CI->getArgOperand(2)); 2482 } else if (IsX86 && Name.startswith("avx512.mask.xor.")) { 2483 VectorType *FTy = cast<VectorType>(CI->getType()); 2484 VectorType *ITy = VectorType::getInteger(FTy); 2485 Rep = Builder.CreateXor(Builder.CreateBitCast(CI->getArgOperand(0), ITy), 2486 Builder.CreateBitCast(CI->getArgOperand(1), ITy)); 2487 Rep = Builder.CreateBitCast(Rep, FTy); 2488 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2489 CI->getArgOperand(2)); 2490 } else if (IsX86 && Name.startswith("avx512.mask.padd.")) { 2491 Rep = Builder.CreateAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2492 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2493 CI->getArgOperand(2)); 2494 } else if (IsX86 && Name.startswith("avx512.mask.psub.")) { 2495 Rep = Builder.CreateSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2496 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2497 CI->getArgOperand(2)); 2498 } else if (IsX86 && Name.startswith("avx512.mask.pmull.")) { 2499 Rep = Builder.CreateMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2500 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2501 CI->getArgOperand(2)); 2502 } else if (IsX86 && Name.startswith("avx512.mask.add.p")) { 2503 if (Name.endswith(".512")) { 2504 Intrinsic::ID IID; 2505 if (Name[17] == 's') 2506 IID = Intrinsic::x86_avx512_add_ps_512; 2507 else 2508 IID = Intrinsic::x86_avx512_add_pd_512; 2509 2510 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2511 { CI->getArgOperand(0), CI->getArgOperand(1), 2512 CI->getArgOperand(4) }); 2513 } else { 2514 Rep = Builder.CreateFAdd(CI->getArgOperand(0), CI->getArgOperand(1)); 2515 } 2516 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2517 CI->getArgOperand(2)); 2518 } else if (IsX86 && Name.startswith("avx512.mask.div.p")) { 2519 if (Name.endswith(".512")) { 2520 Intrinsic::ID IID; 2521 if (Name[17] == 's') 2522 IID = Intrinsic::x86_avx512_div_ps_512; 2523 else 2524 IID = Intrinsic::x86_avx512_div_pd_512; 2525 2526 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2527 { CI->getArgOperand(0), CI->getArgOperand(1), 2528 CI->getArgOperand(4) }); 2529 } else { 2530 Rep = Builder.CreateFDiv(CI->getArgOperand(0), CI->getArgOperand(1)); 2531 } 2532 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2533 CI->getArgOperand(2)); 2534 } else if (IsX86 && Name.startswith("avx512.mask.mul.p")) { 2535 if (Name.endswith(".512")) { 2536 Intrinsic::ID IID; 2537 if (Name[17] == 's') 2538 IID = Intrinsic::x86_avx512_mul_ps_512; 2539 else 2540 IID = Intrinsic::x86_avx512_mul_pd_512; 2541 2542 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2543 { CI->getArgOperand(0), CI->getArgOperand(1), 2544 CI->getArgOperand(4) }); 2545 } else { 2546 Rep = Builder.CreateFMul(CI->getArgOperand(0), CI->getArgOperand(1)); 2547 } 2548 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2549 CI->getArgOperand(2)); 2550 } else if (IsX86 && Name.startswith("avx512.mask.sub.p")) { 2551 if (Name.endswith(".512")) { 2552 Intrinsic::ID IID; 2553 if (Name[17] == 's') 2554 IID = Intrinsic::x86_avx512_sub_ps_512; 2555 else 2556 IID = Intrinsic::x86_avx512_sub_pd_512; 2557 2558 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2559 { CI->getArgOperand(0), CI->getArgOperand(1), 2560 CI->getArgOperand(4) }); 2561 } else { 2562 Rep = Builder.CreateFSub(CI->getArgOperand(0), CI->getArgOperand(1)); 2563 } 2564 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2565 CI->getArgOperand(2)); 2566 } else if (IsX86 && Name.startswith("avx512.mask.max.p") && 2567 Name.drop_front(18) == ".512") { 2568 Intrinsic::ID IID; 2569 if (Name[17] == 's') 2570 IID = Intrinsic::x86_avx512_max_ps_512; 2571 else 2572 IID = Intrinsic::x86_avx512_max_pd_512; 2573 2574 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2575 { CI->getArgOperand(0), CI->getArgOperand(1), 2576 CI->getArgOperand(4) }); 2577 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2578 CI->getArgOperand(2)); 2579 } else if (IsX86 && Name.startswith("avx512.mask.min.p") && 2580 Name.drop_front(18) == ".512") { 2581 Intrinsic::ID IID; 2582 if (Name[17] == 's') 2583 IID = Intrinsic::x86_avx512_min_ps_512; 2584 else 2585 IID = Intrinsic::x86_avx512_min_pd_512; 2586 2587 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2588 { CI->getArgOperand(0), CI->getArgOperand(1), 2589 CI->getArgOperand(4) }); 2590 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2591 CI->getArgOperand(2)); 2592 } else if (IsX86 && Name.startswith("avx512.mask.lzcnt.")) { 2593 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), 2594 Intrinsic::ctlz, 2595 CI->getType()), 2596 { CI->getArgOperand(0), Builder.getInt1(false) }); 2597 Rep = EmitX86Select(Builder, CI->getArgOperand(2), Rep, 2598 CI->getArgOperand(1)); 2599 } else if (IsX86 && Name.startswith("avx512.mask.psll")) { 2600 bool IsImmediate = Name[16] == 'i' || 2601 (Name.size() > 18 && Name[18] == 'i'); 2602 bool IsVariable = Name[16] == 'v'; 2603 char Size = Name[16] == '.' ? Name[17] : 2604 Name[17] == '.' ? Name[18] : 2605 Name[18] == '.' ? Name[19] : 2606 Name[20]; 2607 2608 Intrinsic::ID IID; 2609 if (IsVariable && Name[17] != '.') { 2610 if (Size == 'd' && Name[17] == '2') // avx512.mask.psllv2.di 2611 IID = Intrinsic::x86_avx2_psllv_q; 2612 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psllv4.di 2613 IID = Intrinsic::x86_avx2_psllv_q_256; 2614 else if (Size == 's' && Name[17] == '4') // avx512.mask.psllv4.si 2615 IID = Intrinsic::x86_avx2_psllv_d; 2616 else if (Size == 's' && Name[17] == '8') // avx512.mask.psllv8.si 2617 IID = Intrinsic::x86_avx2_psllv_d_256; 2618 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psllv8.hi 2619 IID = Intrinsic::x86_avx512_psllv_w_128; 2620 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psllv16.hi 2621 IID = Intrinsic::x86_avx512_psllv_w_256; 2622 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psllv32hi 2623 IID = Intrinsic::x86_avx512_psllv_w_512; 2624 else 2625 llvm_unreachable("Unexpected size"); 2626 } else if (Name.endswith(".128")) { 2627 if (Size == 'd') // avx512.mask.psll.d.128, avx512.mask.psll.di.128 2628 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_d 2629 : Intrinsic::x86_sse2_psll_d; 2630 else if (Size == 'q') // avx512.mask.psll.q.128, avx512.mask.psll.qi.128 2631 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_q 2632 : Intrinsic::x86_sse2_psll_q; 2633 else if (Size == 'w') // avx512.mask.psll.w.128, avx512.mask.psll.wi.128 2634 IID = IsImmediate ? Intrinsic::x86_sse2_pslli_w 2635 : Intrinsic::x86_sse2_psll_w; 2636 else 2637 llvm_unreachable("Unexpected size"); 2638 } else if (Name.endswith(".256")) { 2639 if (Size == 'd') // avx512.mask.psll.d.256, avx512.mask.psll.di.256 2640 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_d 2641 : Intrinsic::x86_avx2_psll_d; 2642 else if (Size == 'q') // avx512.mask.psll.q.256, avx512.mask.psll.qi.256 2643 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_q 2644 : Intrinsic::x86_avx2_psll_q; 2645 else if (Size == 'w') // avx512.mask.psll.w.256, avx512.mask.psll.wi.256 2646 IID = IsImmediate ? Intrinsic::x86_avx2_pslli_w 2647 : Intrinsic::x86_avx2_psll_w; 2648 else 2649 llvm_unreachable("Unexpected size"); 2650 } else { 2651 if (Size == 'd') // psll.di.512, pslli.d, psll.d, psllv.d.512 2652 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_d_512 : 2653 IsVariable ? Intrinsic::x86_avx512_psllv_d_512 : 2654 Intrinsic::x86_avx512_psll_d_512; 2655 else if (Size == 'q') // psll.qi.512, pslli.q, psll.q, psllv.q.512 2656 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_q_512 : 2657 IsVariable ? Intrinsic::x86_avx512_psllv_q_512 : 2658 Intrinsic::x86_avx512_psll_q_512; 2659 else if (Size == 'w') // psll.wi.512, pslli.w, psll.w 2660 IID = IsImmediate ? Intrinsic::x86_avx512_pslli_w_512 2661 : Intrinsic::x86_avx512_psll_w_512; 2662 else 2663 llvm_unreachable("Unexpected size"); 2664 } 2665 2666 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2667 } else if (IsX86 && Name.startswith("avx512.mask.psrl")) { 2668 bool IsImmediate = Name[16] == 'i' || 2669 (Name.size() > 18 && Name[18] == 'i'); 2670 bool IsVariable = Name[16] == 'v'; 2671 char Size = Name[16] == '.' ? Name[17] : 2672 Name[17] == '.' ? Name[18] : 2673 Name[18] == '.' ? Name[19] : 2674 Name[20]; 2675 2676 Intrinsic::ID IID; 2677 if (IsVariable && Name[17] != '.') { 2678 if (Size == 'd' && Name[17] == '2') // avx512.mask.psrlv2.di 2679 IID = Intrinsic::x86_avx2_psrlv_q; 2680 else if (Size == 'd' && Name[17] == '4') // avx512.mask.psrlv4.di 2681 IID = Intrinsic::x86_avx2_psrlv_q_256; 2682 else if (Size == 's' && Name[17] == '4') // avx512.mask.psrlv4.si 2683 IID = Intrinsic::x86_avx2_psrlv_d; 2684 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrlv8.si 2685 IID = Intrinsic::x86_avx2_psrlv_d_256; 2686 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrlv8.hi 2687 IID = Intrinsic::x86_avx512_psrlv_w_128; 2688 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrlv16.hi 2689 IID = Intrinsic::x86_avx512_psrlv_w_256; 2690 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrlv32hi 2691 IID = Intrinsic::x86_avx512_psrlv_w_512; 2692 else 2693 llvm_unreachable("Unexpected size"); 2694 } else if (Name.endswith(".128")) { 2695 if (Size == 'd') // avx512.mask.psrl.d.128, avx512.mask.psrl.di.128 2696 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_d 2697 : Intrinsic::x86_sse2_psrl_d; 2698 else if (Size == 'q') // avx512.mask.psrl.q.128, avx512.mask.psrl.qi.128 2699 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_q 2700 : Intrinsic::x86_sse2_psrl_q; 2701 else if (Size == 'w') // avx512.mask.psrl.w.128, avx512.mask.psrl.wi.128 2702 IID = IsImmediate ? Intrinsic::x86_sse2_psrli_w 2703 : Intrinsic::x86_sse2_psrl_w; 2704 else 2705 llvm_unreachable("Unexpected size"); 2706 } else if (Name.endswith(".256")) { 2707 if (Size == 'd') // avx512.mask.psrl.d.256, avx512.mask.psrl.di.256 2708 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_d 2709 : Intrinsic::x86_avx2_psrl_d; 2710 else if (Size == 'q') // avx512.mask.psrl.q.256, avx512.mask.psrl.qi.256 2711 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_q 2712 : Intrinsic::x86_avx2_psrl_q; 2713 else if (Size == 'w') // avx512.mask.psrl.w.256, avx512.mask.psrl.wi.256 2714 IID = IsImmediate ? Intrinsic::x86_avx2_psrli_w 2715 : Intrinsic::x86_avx2_psrl_w; 2716 else 2717 llvm_unreachable("Unexpected size"); 2718 } else { 2719 if (Size == 'd') // psrl.di.512, psrli.d, psrl.d, psrl.d.512 2720 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_d_512 : 2721 IsVariable ? Intrinsic::x86_avx512_psrlv_d_512 : 2722 Intrinsic::x86_avx512_psrl_d_512; 2723 else if (Size == 'q') // psrl.qi.512, psrli.q, psrl.q, psrl.q.512 2724 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_q_512 : 2725 IsVariable ? Intrinsic::x86_avx512_psrlv_q_512 : 2726 Intrinsic::x86_avx512_psrl_q_512; 2727 else if (Size == 'w') // psrl.wi.512, psrli.w, psrl.w) 2728 IID = IsImmediate ? Intrinsic::x86_avx512_psrli_w_512 2729 : Intrinsic::x86_avx512_psrl_w_512; 2730 else 2731 llvm_unreachable("Unexpected size"); 2732 } 2733 2734 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2735 } else if (IsX86 && Name.startswith("avx512.mask.psra")) { 2736 bool IsImmediate = Name[16] == 'i' || 2737 (Name.size() > 18 && Name[18] == 'i'); 2738 bool IsVariable = Name[16] == 'v'; 2739 char Size = Name[16] == '.' ? Name[17] : 2740 Name[17] == '.' ? Name[18] : 2741 Name[18] == '.' ? Name[19] : 2742 Name[20]; 2743 2744 Intrinsic::ID IID; 2745 if (IsVariable && Name[17] != '.') { 2746 if (Size == 's' && Name[17] == '4') // avx512.mask.psrav4.si 2747 IID = Intrinsic::x86_avx2_psrav_d; 2748 else if (Size == 's' && Name[17] == '8') // avx512.mask.psrav8.si 2749 IID = Intrinsic::x86_avx2_psrav_d_256; 2750 else if (Size == 'h' && Name[17] == '8') // avx512.mask.psrav8.hi 2751 IID = Intrinsic::x86_avx512_psrav_w_128; 2752 else if (Size == 'h' && Name[17] == '1') // avx512.mask.psrav16.hi 2753 IID = Intrinsic::x86_avx512_psrav_w_256; 2754 else if (Name[17] == '3' && Name[18] == '2') // avx512.mask.psrav32hi 2755 IID = Intrinsic::x86_avx512_psrav_w_512; 2756 else 2757 llvm_unreachable("Unexpected size"); 2758 } else if (Name.endswith(".128")) { 2759 if (Size == 'd') // avx512.mask.psra.d.128, avx512.mask.psra.di.128 2760 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_d 2761 : Intrinsic::x86_sse2_psra_d; 2762 else if (Size == 'q') // avx512.mask.psra.q.128, avx512.mask.psra.qi.128 2763 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_128 : 2764 IsVariable ? Intrinsic::x86_avx512_psrav_q_128 : 2765 Intrinsic::x86_avx512_psra_q_128; 2766 else if (Size == 'w') // avx512.mask.psra.w.128, avx512.mask.psra.wi.128 2767 IID = IsImmediate ? Intrinsic::x86_sse2_psrai_w 2768 : Intrinsic::x86_sse2_psra_w; 2769 else 2770 llvm_unreachable("Unexpected size"); 2771 } else if (Name.endswith(".256")) { 2772 if (Size == 'd') // avx512.mask.psra.d.256, avx512.mask.psra.di.256 2773 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_d 2774 : Intrinsic::x86_avx2_psra_d; 2775 else if (Size == 'q') // avx512.mask.psra.q.256, avx512.mask.psra.qi.256 2776 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_256 : 2777 IsVariable ? Intrinsic::x86_avx512_psrav_q_256 : 2778 Intrinsic::x86_avx512_psra_q_256; 2779 else if (Size == 'w') // avx512.mask.psra.w.256, avx512.mask.psra.wi.256 2780 IID = IsImmediate ? Intrinsic::x86_avx2_psrai_w 2781 : Intrinsic::x86_avx2_psra_w; 2782 else 2783 llvm_unreachable("Unexpected size"); 2784 } else { 2785 if (Size == 'd') // psra.di.512, psrai.d, psra.d, psrav.d.512 2786 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_d_512 : 2787 IsVariable ? Intrinsic::x86_avx512_psrav_d_512 : 2788 Intrinsic::x86_avx512_psra_d_512; 2789 else if (Size == 'q') // psra.qi.512, psrai.q, psra.q 2790 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_q_512 : 2791 IsVariable ? Intrinsic::x86_avx512_psrav_q_512 : 2792 Intrinsic::x86_avx512_psra_q_512; 2793 else if (Size == 'w') // psra.wi.512, psrai.w, psra.w 2794 IID = IsImmediate ? Intrinsic::x86_avx512_psrai_w_512 2795 : Intrinsic::x86_avx512_psra_w_512; 2796 else 2797 llvm_unreachable("Unexpected size"); 2798 } 2799 2800 Rep = UpgradeX86MaskedShift(Builder, *CI, IID); 2801 } else if (IsX86 && Name.startswith("avx512.mask.move.s")) { 2802 Rep = upgradeMaskedMove(Builder, *CI); 2803 } else if (IsX86 && Name.startswith("avx512.cvtmask2")) { 2804 Rep = UpgradeMaskToInt(Builder, *CI); 2805 } else if (IsX86 && Name.endswith(".movntdqa")) { 2806 Module *M = F->getParent(); 2807 MDNode *Node = MDNode::get( 2808 C, ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(C), 1))); 2809 2810 Value *Ptr = CI->getArgOperand(0); 2811 VectorType *VTy = cast<VectorType>(CI->getType()); 2812 2813 // Convert the type of the pointer to a pointer to the stored type. 2814 Value *BC = 2815 Builder.CreateBitCast(Ptr, PointerType::getUnqual(VTy), "cast"); 2816 LoadInst *LI = Builder.CreateAlignedLoad(BC, VTy->getBitWidth() / 8); 2817 LI->setMetadata(M->getMDKindID("nontemporal"), Node); 2818 Rep = LI; 2819 } else if (IsX86 && 2820 (Name.startswith("sse2.pavg") || Name.startswith("avx2.pavg") || 2821 Name.startswith("avx512.mask.pavg"))) { 2822 // llvm.x86.sse2.pavg.b/w, llvm.x86.avx2.pavg.b/w, 2823 // llvm.x86.avx512.mask.pavg.b/w 2824 Value *A = CI->getArgOperand(0); 2825 Value *B = CI->getArgOperand(1); 2826 VectorType *ZextType = VectorType::getExtendedElementVectorType( 2827 cast<VectorType>(A->getType())); 2828 Value *ExtendedA = Builder.CreateZExt(A, ZextType); 2829 Value *ExtendedB = Builder.CreateZExt(B, ZextType); 2830 Value *Sum = Builder.CreateAdd(ExtendedA, ExtendedB); 2831 Value *AddOne = Builder.CreateAdd(Sum, ConstantInt::get(ZextType, 1)); 2832 Value *ShiftR = Builder.CreateLShr(AddOne, ConstantInt::get(ZextType, 1)); 2833 Rep = Builder.CreateTrunc(ShiftR, A->getType()); 2834 if (CI->getNumArgOperands() > 2) { 2835 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, 2836 CI->getArgOperand(2)); 2837 } 2838 } else if (IsX86 && (Name.startswith("fma.vfmadd.") || 2839 Name.startswith("fma.vfmsub.") || 2840 Name.startswith("fma.vfnmadd.") || 2841 Name.startswith("fma.vfnmsub."))) { 2842 bool NegMul = Name[6] == 'n'; 2843 bool NegAcc = NegMul ? Name[8] == 's' : Name[7] == 's'; 2844 bool IsScalar = NegMul ? Name[12] == 's' : Name[11] == 's'; 2845 2846 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2847 CI->getArgOperand(2) }; 2848 2849 if (IsScalar) { 2850 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 2851 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 2852 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 2853 } 2854 2855 if (NegMul && !IsScalar) 2856 Ops[0] = Builder.CreateFNeg(Ops[0]); 2857 if (NegMul && IsScalar) 2858 Ops[1] = Builder.CreateFNeg(Ops[1]); 2859 if (NegAcc) 2860 Ops[2] = Builder.CreateFNeg(Ops[2]); 2861 2862 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 2863 Intrinsic::fma, 2864 Ops[0]->getType()), 2865 Ops); 2866 2867 if (IsScalar) 2868 Rep = Builder.CreateInsertElement(CI->getArgOperand(0), Rep, 2869 (uint64_t)0); 2870 } else if (IsX86 && Name.startswith("fma4.vfmadd.s")) { 2871 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2872 CI->getArgOperand(2) }; 2873 2874 Ops[0] = Builder.CreateExtractElement(Ops[0], (uint64_t)0); 2875 Ops[1] = Builder.CreateExtractElement(Ops[1], (uint64_t)0); 2876 Ops[2] = Builder.CreateExtractElement(Ops[2], (uint64_t)0); 2877 2878 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), 2879 Intrinsic::fma, 2880 Ops[0]->getType()), 2881 Ops); 2882 2883 Rep = Builder.CreateInsertElement(Constant::getNullValue(CI->getType()), 2884 Rep, (uint64_t)0); 2885 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.s") || 2886 Name.startswith("avx512.maskz.vfmadd.s") || 2887 Name.startswith("avx512.mask3.vfmadd.s") || 2888 Name.startswith("avx512.mask3.vfmsub.s") || 2889 Name.startswith("avx512.mask3.vfnmsub.s"))) { 2890 bool IsMask3 = Name[11] == '3'; 2891 bool IsMaskZ = Name[11] == 'z'; 2892 // Drop the "avx512.mask." to make it easier. 2893 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 2894 bool NegMul = Name[2] == 'n'; 2895 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 2896 2897 Value *A = CI->getArgOperand(0); 2898 Value *B = CI->getArgOperand(1); 2899 Value *C = CI->getArgOperand(2); 2900 2901 if (NegMul && (IsMask3 || IsMaskZ)) 2902 A = Builder.CreateFNeg(A); 2903 if (NegMul && !(IsMask3 || IsMaskZ)) 2904 B = Builder.CreateFNeg(B); 2905 if (NegAcc) 2906 C = Builder.CreateFNeg(C); 2907 2908 A = Builder.CreateExtractElement(A, (uint64_t)0); 2909 B = Builder.CreateExtractElement(B, (uint64_t)0); 2910 C = Builder.CreateExtractElement(C, (uint64_t)0); 2911 2912 if (!isa<ConstantInt>(CI->getArgOperand(4)) || 2913 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4) { 2914 Value *Ops[] = { A, B, C, CI->getArgOperand(4) }; 2915 2916 Intrinsic::ID IID; 2917 if (Name.back() == 'd') 2918 IID = Intrinsic::x86_avx512_vfmadd_f64; 2919 else 2920 IID = Intrinsic::x86_avx512_vfmadd_f32; 2921 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), IID); 2922 Rep = Builder.CreateCall(FMA, Ops); 2923 } else { 2924 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 2925 Intrinsic::fma, 2926 A->getType()); 2927 Rep = Builder.CreateCall(FMA, { A, B, C }); 2928 } 2929 2930 Value *PassThru = IsMaskZ ? Constant::getNullValue(Rep->getType()) : 2931 IsMask3 ? C : A; 2932 2933 // For Mask3 with NegAcc, we need to create a new extractelement that 2934 // avoids the negation above. 2935 if (NegAcc && IsMask3) 2936 PassThru = Builder.CreateExtractElement(CI->getArgOperand(2), 2937 (uint64_t)0); 2938 2939 Rep = EmitX86ScalarSelect(Builder, CI->getArgOperand(3), 2940 Rep, PassThru); 2941 Rep = Builder.CreateInsertElement(CI->getArgOperand(IsMask3 ? 2 : 0), 2942 Rep, (uint64_t)0); 2943 } else if (IsX86 && (Name.startswith("avx512.mask.vfmadd.p") || 2944 Name.startswith("avx512.mask.vfnmadd.p") || 2945 Name.startswith("avx512.mask.vfnmsub.p") || 2946 Name.startswith("avx512.mask3.vfmadd.p") || 2947 Name.startswith("avx512.mask3.vfmsub.p") || 2948 Name.startswith("avx512.mask3.vfnmsub.p") || 2949 Name.startswith("avx512.maskz.vfmadd.p"))) { 2950 bool IsMask3 = Name[11] == '3'; 2951 bool IsMaskZ = Name[11] == 'z'; 2952 // Drop the "avx512.mask." to make it easier. 2953 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 2954 bool NegMul = Name[2] == 'n'; 2955 bool NegAcc = NegMul ? Name[4] == 's' : Name[3] == 's'; 2956 2957 Value *A = CI->getArgOperand(0); 2958 Value *B = CI->getArgOperand(1); 2959 Value *C = CI->getArgOperand(2); 2960 2961 if (NegMul && (IsMask3 || IsMaskZ)) 2962 A = Builder.CreateFNeg(A); 2963 if (NegMul && !(IsMask3 || IsMaskZ)) 2964 B = Builder.CreateFNeg(B); 2965 if (NegAcc) 2966 C = Builder.CreateFNeg(C); 2967 2968 if (CI->getNumArgOperands() == 5 && 2969 (!isa<ConstantInt>(CI->getArgOperand(4)) || 2970 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 2971 Intrinsic::ID IID; 2972 // Check the character before ".512" in string. 2973 if (Name[Name.size()-5] == 's') 2974 IID = Intrinsic::x86_avx512_vfmadd_ps_512; 2975 else 2976 IID = Intrinsic::x86_avx512_vfmadd_pd_512; 2977 2978 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 2979 { A, B, C, CI->getArgOperand(4) }); 2980 } else { 2981 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), 2982 Intrinsic::fma, 2983 A->getType()); 2984 Rep = Builder.CreateCall(FMA, { A, B, C }); 2985 } 2986 2987 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 2988 IsMask3 ? CI->getArgOperand(2) : 2989 CI->getArgOperand(0); 2990 2991 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 2992 } else if (IsX86 && (Name.startswith("fma.vfmaddsub.p") || 2993 Name.startswith("fma.vfmsubadd.p"))) { 2994 bool IsSubAdd = Name[7] == 's'; 2995 int NumElts = CI->getType()->getVectorNumElements(); 2996 2997 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 2998 CI->getArgOperand(2) }; 2999 3000 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3001 Ops[0]->getType()); 3002 Value *Odd = Builder.CreateCall(FMA, Ops); 3003 Ops[2] = Builder.CreateFNeg(Ops[2]); 3004 Value *Even = Builder.CreateCall(FMA, Ops); 3005 3006 if (IsSubAdd) 3007 std::swap(Even, Odd); 3008 3009 SmallVector<uint32_t, 32> Idxs(NumElts); 3010 for (int i = 0; i != NumElts; ++i) 3011 Idxs[i] = i + (i % 2) * NumElts; 3012 3013 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3014 } else if (IsX86 && (Name.startswith("avx512.mask.vfmaddsub.p") || 3015 Name.startswith("avx512.mask3.vfmaddsub.p") || 3016 Name.startswith("avx512.maskz.vfmaddsub.p") || 3017 Name.startswith("avx512.mask3.vfmsubadd.p"))) { 3018 bool IsMask3 = Name[11] == '3'; 3019 bool IsMaskZ = Name[11] == 'z'; 3020 // Drop the "avx512.mask." to make it easier. 3021 Name = Name.drop_front(IsMask3 || IsMaskZ ? 13 : 12); 3022 bool IsSubAdd = Name[3] == 's'; 3023 if (CI->getNumArgOperands() == 5 && 3024 (!isa<ConstantInt>(CI->getArgOperand(4)) || 3025 cast<ConstantInt>(CI->getArgOperand(4))->getZExtValue() != 4)) { 3026 Intrinsic::ID IID; 3027 // Check the character before ".512" in string. 3028 if (Name[Name.size()-5] == 's') 3029 IID = Intrinsic::x86_avx512_vfmaddsub_ps_512; 3030 else 3031 IID = Intrinsic::x86_avx512_vfmaddsub_pd_512; 3032 3033 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3034 CI->getArgOperand(2), CI->getArgOperand(4) }; 3035 if (IsSubAdd) 3036 Ops[2] = Builder.CreateFNeg(Ops[2]); 3037 3038 Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID), 3039 {CI->getArgOperand(0), CI->getArgOperand(1), 3040 CI->getArgOperand(2), CI->getArgOperand(4)}); 3041 } else { 3042 int NumElts = CI->getType()->getVectorNumElements(); 3043 3044 Value *Ops[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3045 CI->getArgOperand(2) }; 3046 3047 Function *FMA = Intrinsic::getDeclaration(CI->getModule(), Intrinsic::fma, 3048 Ops[0]->getType()); 3049 Value *Odd = Builder.CreateCall(FMA, Ops); 3050 Ops[2] = Builder.CreateFNeg(Ops[2]); 3051 Value *Even = Builder.CreateCall(FMA, Ops); 3052 3053 if (IsSubAdd) 3054 std::swap(Even, Odd); 3055 3056 SmallVector<uint32_t, 32> Idxs(NumElts); 3057 for (int i = 0; i != NumElts; ++i) 3058 Idxs[i] = i + (i % 2) * NumElts; 3059 3060 Rep = Builder.CreateShuffleVector(Even, Odd, Idxs); 3061 } 3062 3063 Value *PassThru = IsMaskZ ? llvm::Constant::getNullValue(CI->getType()) : 3064 IsMask3 ? CI->getArgOperand(2) : 3065 CI->getArgOperand(0); 3066 3067 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3068 } else if (IsX86 && (Name.startswith("avx512.mask.pternlog.") || 3069 Name.startswith("avx512.maskz.pternlog."))) { 3070 bool ZeroMask = Name[11] == 'z'; 3071 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3072 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 3073 Intrinsic::ID IID; 3074 if (VecWidth == 128 && EltWidth == 32) 3075 IID = Intrinsic::x86_avx512_pternlog_d_128; 3076 else if (VecWidth == 256 && EltWidth == 32) 3077 IID = Intrinsic::x86_avx512_pternlog_d_256; 3078 else if (VecWidth == 512 && EltWidth == 32) 3079 IID = Intrinsic::x86_avx512_pternlog_d_512; 3080 else if (VecWidth == 128 && EltWidth == 64) 3081 IID = Intrinsic::x86_avx512_pternlog_q_128; 3082 else if (VecWidth == 256 && EltWidth == 64) 3083 IID = Intrinsic::x86_avx512_pternlog_q_256; 3084 else if (VecWidth == 512 && EltWidth == 64) 3085 IID = Intrinsic::x86_avx512_pternlog_q_512; 3086 else 3087 llvm_unreachable("Unexpected intrinsic"); 3088 3089 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3090 CI->getArgOperand(2), CI->getArgOperand(3) }; 3091 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3092 Args); 3093 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3094 : CI->getArgOperand(0); 3095 Rep = EmitX86Select(Builder, CI->getArgOperand(4), Rep, PassThru); 3096 } else if (IsX86 && (Name.startswith("avx512.mask.vpmadd52") || 3097 Name.startswith("avx512.maskz.vpmadd52"))) { 3098 bool ZeroMask = Name[11] == 'z'; 3099 bool High = Name[20] == 'h' || Name[21] == 'h'; 3100 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3101 Intrinsic::ID IID; 3102 if (VecWidth == 128 && !High) 3103 IID = Intrinsic::x86_avx512_vpmadd52l_uq_128; 3104 else if (VecWidth == 256 && !High) 3105 IID = Intrinsic::x86_avx512_vpmadd52l_uq_256; 3106 else if (VecWidth == 512 && !High) 3107 IID = Intrinsic::x86_avx512_vpmadd52l_uq_512; 3108 else if (VecWidth == 128 && High) 3109 IID = Intrinsic::x86_avx512_vpmadd52h_uq_128; 3110 else if (VecWidth == 256 && High) 3111 IID = Intrinsic::x86_avx512_vpmadd52h_uq_256; 3112 else if (VecWidth == 512 && High) 3113 IID = Intrinsic::x86_avx512_vpmadd52h_uq_512; 3114 else 3115 llvm_unreachable("Unexpected intrinsic"); 3116 3117 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3118 CI->getArgOperand(2) }; 3119 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3120 Args); 3121 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3122 : CI->getArgOperand(0); 3123 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3124 } else if (IsX86 && (Name.startswith("avx512.mask.vpermi2var.") || 3125 Name.startswith("avx512.mask.vpermt2var.") || 3126 Name.startswith("avx512.maskz.vpermt2var."))) { 3127 bool ZeroMask = Name[11] == 'z'; 3128 bool IndexForm = Name[17] == 'i'; 3129 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3130 unsigned EltWidth = CI->getType()->getScalarSizeInBits(); 3131 bool IsFloat = CI->getType()->isFPOrFPVectorTy(); 3132 Intrinsic::ID IID; 3133 if (VecWidth == 128 && EltWidth == 32 && IsFloat) 3134 IID = Intrinsic::x86_avx512_vpermi2var_ps_128; 3135 else if (VecWidth == 128 && EltWidth == 32 && !IsFloat) 3136 IID = Intrinsic::x86_avx512_vpermi2var_d_128; 3137 else if (VecWidth == 128 && EltWidth == 64 && IsFloat) 3138 IID = Intrinsic::x86_avx512_vpermi2var_pd_128; 3139 else if (VecWidth == 128 && EltWidth == 64 && !IsFloat) 3140 IID = Intrinsic::x86_avx512_vpermi2var_q_128; 3141 else if (VecWidth == 256 && EltWidth == 32 && IsFloat) 3142 IID = Intrinsic::x86_avx512_vpermi2var_ps_256; 3143 else if (VecWidth == 256 && EltWidth == 32 && !IsFloat) 3144 IID = Intrinsic::x86_avx512_vpermi2var_d_256; 3145 else if (VecWidth == 256 && EltWidth == 64 && IsFloat) 3146 IID = Intrinsic::x86_avx512_vpermi2var_pd_256; 3147 else if (VecWidth == 256 && EltWidth == 64 && !IsFloat) 3148 IID = Intrinsic::x86_avx512_vpermi2var_q_256; 3149 else if (VecWidth == 512 && EltWidth == 32 && IsFloat) 3150 IID = Intrinsic::x86_avx512_vpermi2var_ps_512; 3151 else if (VecWidth == 512 && EltWidth == 32 && !IsFloat) 3152 IID = Intrinsic::x86_avx512_vpermi2var_d_512; 3153 else if (VecWidth == 512 && EltWidth == 64 && IsFloat) 3154 IID = Intrinsic::x86_avx512_vpermi2var_pd_512; 3155 else if (VecWidth == 512 && EltWidth == 64 && !IsFloat) 3156 IID = Intrinsic::x86_avx512_vpermi2var_q_512; 3157 else if (VecWidth == 128 && EltWidth == 16) 3158 IID = Intrinsic::x86_avx512_vpermi2var_hi_128; 3159 else if (VecWidth == 256 && EltWidth == 16) 3160 IID = Intrinsic::x86_avx512_vpermi2var_hi_256; 3161 else if (VecWidth == 512 && EltWidth == 16) 3162 IID = Intrinsic::x86_avx512_vpermi2var_hi_512; 3163 else if (VecWidth == 128 && EltWidth == 8) 3164 IID = Intrinsic::x86_avx512_vpermi2var_qi_128; 3165 else if (VecWidth == 256 && EltWidth == 8) 3166 IID = Intrinsic::x86_avx512_vpermi2var_qi_256; 3167 else if (VecWidth == 512 && EltWidth == 8) 3168 IID = Intrinsic::x86_avx512_vpermi2var_qi_512; 3169 else 3170 llvm_unreachable("Unexpected intrinsic"); 3171 3172 Value *Args[] = { CI->getArgOperand(0) , CI->getArgOperand(1), 3173 CI->getArgOperand(2) }; 3174 3175 // If this isn't index form we need to swap operand 0 and 1. 3176 if (!IndexForm) 3177 std::swap(Args[0], Args[1]); 3178 3179 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3180 Args); 3181 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3182 : Builder.CreateBitCast(CI->getArgOperand(1), 3183 CI->getType()); 3184 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3185 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpbusd.") || 3186 Name.startswith("avx512.maskz.vpdpbusd.") || 3187 Name.startswith("avx512.mask.vpdpbusds.") || 3188 Name.startswith("avx512.maskz.vpdpbusds."))) { 3189 bool ZeroMask = Name[11] == 'z'; 3190 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3191 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3192 Intrinsic::ID IID; 3193 if (VecWidth == 128 && !IsSaturating) 3194 IID = Intrinsic::x86_avx512_vpdpbusd_128; 3195 else if (VecWidth == 256 && !IsSaturating) 3196 IID = Intrinsic::x86_avx512_vpdpbusd_256; 3197 else if (VecWidth == 512 && !IsSaturating) 3198 IID = Intrinsic::x86_avx512_vpdpbusd_512; 3199 else if (VecWidth == 128 && IsSaturating) 3200 IID = Intrinsic::x86_avx512_vpdpbusds_128; 3201 else if (VecWidth == 256 && IsSaturating) 3202 IID = Intrinsic::x86_avx512_vpdpbusds_256; 3203 else if (VecWidth == 512 && IsSaturating) 3204 IID = Intrinsic::x86_avx512_vpdpbusds_512; 3205 else 3206 llvm_unreachable("Unexpected intrinsic"); 3207 3208 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3209 CI->getArgOperand(2) }; 3210 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3211 Args); 3212 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3213 : CI->getArgOperand(0); 3214 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3215 } else if (IsX86 && (Name.startswith("avx512.mask.vpdpwssd.") || 3216 Name.startswith("avx512.maskz.vpdpwssd.") || 3217 Name.startswith("avx512.mask.vpdpwssds.") || 3218 Name.startswith("avx512.maskz.vpdpwssds."))) { 3219 bool ZeroMask = Name[11] == 'z'; 3220 bool IsSaturating = Name[ZeroMask ? 21 : 20] == 's'; 3221 unsigned VecWidth = CI->getType()->getPrimitiveSizeInBits(); 3222 Intrinsic::ID IID; 3223 if (VecWidth == 128 && !IsSaturating) 3224 IID = Intrinsic::x86_avx512_vpdpwssd_128; 3225 else if (VecWidth == 256 && !IsSaturating) 3226 IID = Intrinsic::x86_avx512_vpdpwssd_256; 3227 else if (VecWidth == 512 && !IsSaturating) 3228 IID = Intrinsic::x86_avx512_vpdpwssd_512; 3229 else if (VecWidth == 128 && IsSaturating) 3230 IID = Intrinsic::x86_avx512_vpdpwssds_128; 3231 else if (VecWidth == 256 && IsSaturating) 3232 IID = Intrinsic::x86_avx512_vpdpwssds_256; 3233 else if (VecWidth == 512 && IsSaturating) 3234 IID = Intrinsic::x86_avx512_vpdpwssds_512; 3235 else 3236 llvm_unreachable("Unexpected intrinsic"); 3237 3238 Value *Args[] = { CI->getArgOperand(0), CI->getArgOperand(1), 3239 CI->getArgOperand(2) }; 3240 Rep = Builder.CreateCall(Intrinsic::getDeclaration(CI->getModule(), IID), 3241 Args); 3242 Value *PassThru = ZeroMask ? ConstantAggregateZero::get(CI->getType()) 3243 : CI->getArgOperand(0); 3244 Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep, PassThru); 3245 } else if (IsX86 && Name.startswith("avx512.mask.") && 3246 upgradeAVX512MaskToSelect(Name, Builder, *CI, Rep)) { 3247 // Rep will be updated by the call in the condition. 3248 } else if (IsNVVM && (Name == "abs.i" || Name == "abs.ll")) { 3249 Value *Arg = CI->getArgOperand(0); 3250 Value *Neg = Builder.CreateNeg(Arg, "neg"); 3251 Value *Cmp = Builder.CreateICmpSGE( 3252 Arg, llvm::Constant::getNullValue(Arg->getType()), "abs.cond"); 3253 Rep = Builder.CreateSelect(Cmp, Arg, Neg, "abs"); 3254 } else if (IsNVVM && (Name == "max.i" || Name == "max.ll" || 3255 Name == "max.ui" || Name == "max.ull")) { 3256 Value *Arg0 = CI->getArgOperand(0); 3257 Value *Arg1 = CI->getArgOperand(1); 3258 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3259 ? Builder.CreateICmpUGE(Arg0, Arg1, "max.cond") 3260 : Builder.CreateICmpSGE(Arg0, Arg1, "max.cond"); 3261 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "max"); 3262 } else if (IsNVVM && (Name == "min.i" || Name == "min.ll" || 3263 Name == "min.ui" || Name == "min.ull")) { 3264 Value *Arg0 = CI->getArgOperand(0); 3265 Value *Arg1 = CI->getArgOperand(1); 3266 Value *Cmp = Name.endswith(".ui") || Name.endswith(".ull") 3267 ? Builder.CreateICmpULE(Arg0, Arg1, "min.cond") 3268 : Builder.CreateICmpSLE(Arg0, Arg1, "min.cond"); 3269 Rep = Builder.CreateSelect(Cmp, Arg0, Arg1, "min"); 3270 } else if (IsNVVM && Name == "clz.ll") { 3271 // llvm.nvvm.clz.ll returns an i32, but llvm.ctlz.i64 and returns an i64. 3272 Value *Arg = CI->getArgOperand(0); 3273 Value *Ctlz = Builder.CreateCall( 3274 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctlz, 3275 {Arg->getType()}), 3276 {Arg, Builder.getFalse()}, "ctlz"); 3277 Rep = Builder.CreateTrunc(Ctlz, Builder.getInt32Ty(), "ctlz.trunc"); 3278 } else if (IsNVVM && Name == "popc.ll") { 3279 // llvm.nvvm.popc.ll returns an i32, but llvm.ctpop.i64 and returns an 3280 // i64. 3281 Value *Arg = CI->getArgOperand(0); 3282 Value *Popc = Builder.CreateCall( 3283 Intrinsic::getDeclaration(F->getParent(), Intrinsic::ctpop, 3284 {Arg->getType()}), 3285 Arg, "ctpop"); 3286 Rep = Builder.CreateTrunc(Popc, Builder.getInt32Ty(), "ctpop.trunc"); 3287 } else if (IsNVVM && Name == "h2f") { 3288 Rep = Builder.CreateCall(Intrinsic::getDeclaration( 3289 F->getParent(), Intrinsic::convert_from_fp16, 3290 {Builder.getFloatTy()}), 3291 CI->getArgOperand(0), "h2f"); 3292 } else { 3293 llvm_unreachable("Unknown function for CallInst upgrade."); 3294 } 3295 3296 if (Rep) 3297 CI->replaceAllUsesWith(Rep); 3298 CI->eraseFromParent(); 3299 return; 3300 } 3301 3302 const auto &DefaultCase = [&NewFn, &CI]() -> void { 3303 // Handle generic mangling change, but nothing else 3304 assert( 3305 (CI->getCalledFunction()->getName() != NewFn->getName()) && 3306 "Unknown function for CallInst upgrade and isn't just a name change"); 3307 CI->setCalledFunction(NewFn); 3308 }; 3309 CallInst *NewCall = nullptr; 3310 switch (NewFn->getIntrinsicID()) { 3311 default: { 3312 DefaultCase(); 3313 return; 3314 } 3315 3316 case Intrinsic::arm_neon_vld1: 3317 case Intrinsic::arm_neon_vld2: 3318 case Intrinsic::arm_neon_vld3: 3319 case Intrinsic::arm_neon_vld4: 3320 case Intrinsic::arm_neon_vld2lane: 3321 case Intrinsic::arm_neon_vld3lane: 3322 case Intrinsic::arm_neon_vld4lane: 3323 case Intrinsic::arm_neon_vst1: 3324 case Intrinsic::arm_neon_vst2: 3325 case Intrinsic::arm_neon_vst3: 3326 case Intrinsic::arm_neon_vst4: 3327 case Intrinsic::arm_neon_vst2lane: 3328 case Intrinsic::arm_neon_vst3lane: 3329 case Intrinsic::arm_neon_vst4lane: { 3330 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3331 CI->arg_operands().end()); 3332 NewCall = Builder.CreateCall(NewFn, Args); 3333 break; 3334 } 3335 3336 case Intrinsic::bitreverse: 3337 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3338 break; 3339 3340 case Intrinsic::ctlz: 3341 case Intrinsic::cttz: 3342 assert(CI->getNumArgOperands() == 1 && 3343 "Mismatch between function args and call args"); 3344 NewCall = 3345 Builder.CreateCall(NewFn, {CI->getArgOperand(0), Builder.getFalse()}); 3346 break; 3347 3348 case Intrinsic::objectsize: { 3349 Value *NullIsUnknownSize = CI->getNumArgOperands() == 2 3350 ? Builder.getFalse() 3351 : CI->getArgOperand(2); 3352 NewCall = Builder.CreateCall( 3353 NewFn, {CI->getArgOperand(0), CI->getArgOperand(1), NullIsUnknownSize}); 3354 break; 3355 } 3356 3357 case Intrinsic::ctpop: 3358 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3359 break; 3360 3361 case Intrinsic::convert_from_fp16: 3362 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(0)}); 3363 break; 3364 3365 case Intrinsic::dbg_value: 3366 // Upgrade from the old version that had an extra offset argument. 3367 assert(CI->getNumArgOperands() == 4); 3368 // Drop nonzero offsets instead of attempting to upgrade them. 3369 if (auto *Offset = dyn_cast_or_null<Constant>(CI->getArgOperand(1))) 3370 if (Offset->isZeroValue()) { 3371 NewCall = Builder.CreateCall( 3372 NewFn, 3373 {CI->getArgOperand(0), CI->getArgOperand(2), CI->getArgOperand(3)}); 3374 break; 3375 } 3376 CI->eraseFromParent(); 3377 return; 3378 3379 case Intrinsic::x86_xop_vfrcz_ss: 3380 case Intrinsic::x86_xop_vfrcz_sd: 3381 NewCall = Builder.CreateCall(NewFn, {CI->getArgOperand(1)}); 3382 break; 3383 3384 case Intrinsic::x86_xop_vpermil2pd: 3385 case Intrinsic::x86_xop_vpermil2ps: 3386 case Intrinsic::x86_xop_vpermil2pd_256: 3387 case Intrinsic::x86_xop_vpermil2ps_256: { 3388 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3389 CI->arg_operands().end()); 3390 VectorType *FltIdxTy = cast<VectorType>(Args[2]->getType()); 3391 VectorType *IntIdxTy = VectorType::getInteger(FltIdxTy); 3392 Args[2] = Builder.CreateBitCast(Args[2], IntIdxTy); 3393 NewCall = Builder.CreateCall(NewFn, Args); 3394 break; 3395 } 3396 3397 case Intrinsic::x86_sse41_ptestc: 3398 case Intrinsic::x86_sse41_ptestz: 3399 case Intrinsic::x86_sse41_ptestnzc: { 3400 // The arguments for these intrinsics used to be v4f32, and changed 3401 // to v2i64. This is purely a nop, since those are bitwise intrinsics. 3402 // So, the only thing required is a bitcast for both arguments. 3403 // First, check the arguments have the old type. 3404 Value *Arg0 = CI->getArgOperand(0); 3405 if (Arg0->getType() != VectorType::get(Type::getFloatTy(C), 4)) 3406 return; 3407 3408 // Old intrinsic, add bitcasts 3409 Value *Arg1 = CI->getArgOperand(1); 3410 3411 Type *NewVecTy = VectorType::get(Type::getInt64Ty(C), 2); 3412 3413 Value *BC0 = Builder.CreateBitCast(Arg0, NewVecTy, "cast"); 3414 Value *BC1 = Builder.CreateBitCast(Arg1, NewVecTy, "cast"); 3415 3416 NewCall = Builder.CreateCall(NewFn, {BC0, BC1}); 3417 break; 3418 } 3419 3420 case Intrinsic::x86_sse41_insertps: 3421 case Intrinsic::x86_sse41_dppd: 3422 case Intrinsic::x86_sse41_dpps: 3423 case Intrinsic::x86_sse41_mpsadbw: 3424 case Intrinsic::x86_avx_dp_ps_256: 3425 case Intrinsic::x86_avx2_mpsadbw: { 3426 // Need to truncate the last argument from i32 to i8 -- this argument models 3427 // an inherently 8-bit immediate operand to these x86 instructions. 3428 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3429 CI->arg_operands().end()); 3430 3431 // Replace the last argument with a trunc. 3432 Args.back() = Builder.CreateTrunc(Args.back(), Type::getInt8Ty(C), "trunc"); 3433 NewCall = Builder.CreateCall(NewFn, Args); 3434 break; 3435 } 3436 3437 case Intrinsic::thread_pointer: { 3438 NewCall = Builder.CreateCall(NewFn, {}); 3439 break; 3440 } 3441 3442 case Intrinsic::invariant_start: 3443 case Intrinsic::invariant_end: 3444 case Intrinsic::masked_load: 3445 case Intrinsic::masked_store: 3446 case Intrinsic::masked_gather: 3447 case Intrinsic::masked_scatter: { 3448 SmallVector<Value *, 4> Args(CI->arg_operands().begin(), 3449 CI->arg_operands().end()); 3450 NewCall = Builder.CreateCall(NewFn, Args); 3451 break; 3452 } 3453 3454 case Intrinsic::memcpy: 3455 case Intrinsic::memmove: 3456 case Intrinsic::memset: { 3457 // We have to make sure that the call signature is what we're expecting. 3458 // We only want to change the old signatures by removing the alignment arg: 3459 // @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i32, i1) 3460 // -> @llvm.mem[cpy|move]...(i8*, i8*, i[32|i64], i1) 3461 // @llvm.memset...(i8*, i8, i[32|64], i32, i1) 3462 // -> @llvm.memset...(i8*, i8, i[32|64], i1) 3463 // Note: i8*'s in the above can be any pointer type 3464 if (CI->getNumArgOperands() != 5) { 3465 DefaultCase(); 3466 return; 3467 } 3468 // Remove alignment argument (3), and add alignment attributes to the 3469 // dest/src pointers. 3470 Value *Args[4] = {CI->getArgOperand(0), CI->getArgOperand(1), 3471 CI->getArgOperand(2), CI->getArgOperand(4)}; 3472 NewCall = Builder.CreateCall(NewFn, Args); 3473 auto *MemCI = cast<MemIntrinsic>(NewCall); 3474 // All mem intrinsics support dest alignment. 3475 const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3)); 3476 MemCI->setDestAlignment(Align->getZExtValue()); 3477 // Memcpy/Memmove also support source alignment. 3478 if (auto *MTI = dyn_cast<MemTransferInst>(MemCI)) 3479 MTI->setSourceAlignment(Align->getZExtValue()); 3480 break; 3481 } 3482 } 3483 assert(NewCall && "Should have either set this variable or returned through " 3484 "the default case"); 3485 std::string Name = CI->getName(); 3486 if (!Name.empty()) { 3487 CI->setName(Name + ".old"); 3488 NewCall->setName(Name); 3489 } 3490 CI->replaceAllUsesWith(NewCall); 3491 CI->eraseFromParent(); 3492 } 3493 3494 void llvm::UpgradeCallsToIntrinsic(Function *F) { 3495 assert(F && "Illegal attempt to upgrade a non-existent intrinsic."); 3496 3497 // Check if this function should be upgraded and get the replacement function 3498 // if there is one. 3499 Function *NewFn; 3500 if (UpgradeIntrinsicFunction(F, NewFn)) { 3501 // Replace all users of the old function with the new function or new 3502 // instructions. This is not a range loop because the call is deleted. 3503 for (auto UI = F->user_begin(), UE = F->user_end(); UI != UE; ) 3504 if (CallInst *CI = dyn_cast<CallInst>(*UI++)) 3505 UpgradeIntrinsicCall(CI, NewFn); 3506 3507 // Remove old function, no longer used, from the module. 3508 F->eraseFromParent(); 3509 } 3510 } 3511 3512 MDNode *llvm::UpgradeTBAANode(MDNode &MD) { 3513 // Check if the tag uses struct-path aware TBAA format. 3514 if (isa<MDNode>(MD.getOperand(0)) && MD.getNumOperands() >= 3) 3515 return &MD; 3516 3517 auto &Context = MD.getContext(); 3518 if (MD.getNumOperands() == 3) { 3519 Metadata *Elts[] = {MD.getOperand(0), MD.getOperand(1)}; 3520 MDNode *ScalarType = MDNode::get(Context, Elts); 3521 // Create a MDNode <ScalarType, ScalarType, offset 0, const> 3522 Metadata *Elts2[] = {ScalarType, ScalarType, 3523 ConstantAsMetadata::get( 3524 Constant::getNullValue(Type::getInt64Ty(Context))), 3525 MD.getOperand(2)}; 3526 return MDNode::get(Context, Elts2); 3527 } 3528 // Create a MDNode <MD, MD, offset 0> 3529 Metadata *Elts[] = {&MD, &MD, ConstantAsMetadata::get(Constant::getNullValue( 3530 Type::getInt64Ty(Context)))}; 3531 return MDNode::get(Context, Elts); 3532 } 3533 3534 Instruction *llvm::UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy, 3535 Instruction *&Temp) { 3536 if (Opc != Instruction::BitCast) 3537 return nullptr; 3538 3539 Temp = nullptr; 3540 Type *SrcTy = V->getType(); 3541 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3542 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3543 LLVMContext &Context = V->getContext(); 3544 3545 // We have no information about target data layout, so we assume that 3546 // the maximum pointer size is 64bit. 3547 Type *MidTy = Type::getInt64Ty(Context); 3548 Temp = CastInst::Create(Instruction::PtrToInt, V, MidTy); 3549 3550 return CastInst::Create(Instruction::IntToPtr, Temp, DestTy); 3551 } 3552 3553 return nullptr; 3554 } 3555 3556 Value *llvm::UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy) { 3557 if (Opc != Instruction::BitCast) 3558 return nullptr; 3559 3560 Type *SrcTy = C->getType(); 3561 if (SrcTy->isPtrOrPtrVectorTy() && DestTy->isPtrOrPtrVectorTy() && 3562 SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace()) { 3563 LLVMContext &Context = C->getContext(); 3564 3565 // We have no information about target data layout, so we assume that 3566 // the maximum pointer size is 64bit. 3567 Type *MidTy = Type::getInt64Ty(Context); 3568 3569 return ConstantExpr::getIntToPtr(ConstantExpr::getPtrToInt(C, MidTy), 3570 DestTy); 3571 } 3572 3573 return nullptr; 3574 } 3575 3576 /// Check the debug info version number, if it is out-dated, drop the debug 3577 /// info. Return true if module is modified. 3578 bool llvm::UpgradeDebugInfo(Module &M) { 3579 unsigned Version = getDebugMetadataVersionFromModule(M); 3580 if (Version == DEBUG_METADATA_VERSION) { 3581 bool BrokenDebugInfo = false; 3582 if (verifyModule(M, &llvm::errs(), &BrokenDebugInfo)) 3583 report_fatal_error("Broken module found, compilation aborted!"); 3584 if (!BrokenDebugInfo) 3585 // Everything is ok. 3586 return false; 3587 else { 3588 // Diagnose malformed debug info. 3589 DiagnosticInfoIgnoringInvalidDebugMetadata Diag(M); 3590 M.getContext().diagnose(Diag); 3591 } 3592 } 3593 bool Modified = StripDebugInfo(M); 3594 if (Modified && Version != DEBUG_METADATA_VERSION) { 3595 // Diagnose a version mismatch. 3596 DiagnosticInfoDebugMetadataVersion DiagVersion(M, Version); 3597 M.getContext().diagnose(DiagVersion); 3598 } 3599 return Modified; 3600 } 3601 3602 bool llvm::UpgradeRetainReleaseMarker(Module &M) { 3603 bool Changed = false; 3604 NamedMDNode *ModRetainReleaseMarker = 3605 M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"); 3606 if (ModRetainReleaseMarker) { 3607 MDNode *Op = ModRetainReleaseMarker->getOperand(0); 3608 if (Op) { 3609 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(0)); 3610 if (ID) { 3611 SmallVector<StringRef, 4> ValueComp; 3612 ID->getString().split(ValueComp, "#"); 3613 if (ValueComp.size() == 2) { 3614 std::string NewValue = ValueComp[0].str() + ";" + ValueComp[1].str(); 3615 Metadata *Ops[1] = {MDString::get(M.getContext(), NewValue)}; 3616 ModRetainReleaseMarker->setOperand(0, 3617 MDNode::get(M.getContext(), Ops)); 3618 Changed = true; 3619 } 3620 } 3621 } 3622 } 3623 return Changed; 3624 } 3625 3626 bool llvm::UpgradeModuleFlags(Module &M) { 3627 NamedMDNode *ModFlags = M.getModuleFlagsMetadata(); 3628 if (!ModFlags) 3629 return false; 3630 3631 bool HasObjCFlag = false, HasClassProperties = false, Changed = false; 3632 for (unsigned I = 0, E = ModFlags->getNumOperands(); I != E; ++I) { 3633 MDNode *Op = ModFlags->getOperand(I); 3634 if (Op->getNumOperands() != 3) 3635 continue; 3636 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1)); 3637 if (!ID) 3638 continue; 3639 if (ID->getString() == "Objective-C Image Info Version") 3640 HasObjCFlag = true; 3641 if (ID->getString() == "Objective-C Class Properties") 3642 HasClassProperties = true; 3643 // Upgrade PIC/PIE Module Flags. The module flag behavior for these two 3644 // field was Error and now they are Max. 3645 if (ID->getString() == "PIC Level" || ID->getString() == "PIE Level") { 3646 if (auto *Behavior = 3647 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0))) { 3648 if (Behavior->getLimitedValue() == Module::Error) { 3649 Type *Int32Ty = Type::getInt32Ty(M.getContext()); 3650 Metadata *Ops[3] = { 3651 ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Module::Max)), 3652 MDString::get(M.getContext(), ID->getString()), 3653 Op->getOperand(2)}; 3654 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3655 Changed = true; 3656 } 3657 } 3658 } 3659 // Upgrade Objective-C Image Info Section. Removed the whitespce in the 3660 // section name so that llvm-lto will not complain about mismatching 3661 // module flags that is functionally the same. 3662 if (ID->getString() == "Objective-C Image Info Section") { 3663 if (auto *Value = dyn_cast_or_null<MDString>(Op->getOperand(2))) { 3664 SmallVector<StringRef, 4> ValueComp; 3665 Value->getString().split(ValueComp, " "); 3666 if (ValueComp.size() != 1) { 3667 std::string NewValue; 3668 for (auto &S : ValueComp) 3669 NewValue += S.str(); 3670 Metadata *Ops[3] = {Op->getOperand(0), Op->getOperand(1), 3671 MDString::get(M.getContext(), NewValue)}; 3672 ModFlags->setOperand(I, MDNode::get(M.getContext(), Ops)); 3673 Changed = true; 3674 } 3675 } 3676 } 3677 } 3678 3679 // "Objective-C Class Properties" is recently added for Objective-C. We 3680 // upgrade ObjC bitcodes to contain a "Objective-C Class Properties" module 3681 // flag of value 0, so we can correclty downgrade this flag when trying to 3682 // link an ObjC bitcode without this module flag with an ObjC bitcode with 3683 // this module flag. 3684 if (HasObjCFlag && !HasClassProperties) { 3685 M.addModuleFlag(llvm::Module::Override, "Objective-C Class Properties", 3686 (uint32_t)0); 3687 Changed = true; 3688 } 3689 3690 return Changed; 3691 } 3692 3693 void llvm::UpgradeSectionAttributes(Module &M) { 3694 auto TrimSpaces = [](StringRef Section) -> std::string { 3695 SmallVector<StringRef, 5> Components; 3696 Section.split(Components, ','); 3697 3698 SmallString<32> Buffer; 3699 raw_svector_ostream OS(Buffer); 3700 3701 for (auto Component : Components) 3702 OS << ',' << Component.trim(); 3703 3704 return OS.str().substr(1); 3705 }; 3706 3707 for (auto &GV : M.globals()) { 3708 if (!GV.hasSection()) 3709 continue; 3710 3711 StringRef Section = GV.getSection(); 3712 3713 if (!Section.startswith("__DATA, __objc_catlist")) 3714 continue; 3715 3716 // __DATA, __objc_catlist, regular, no_dead_strip 3717 // __DATA,__objc_catlist,regular,no_dead_strip 3718 GV.setSection(TrimSpaces(Section)); 3719 } 3720 } 3721 3722 static bool isOldLoopArgument(Metadata *MD) { 3723 auto *T = dyn_cast_or_null<MDTuple>(MD); 3724 if (!T) 3725 return false; 3726 if (T->getNumOperands() < 1) 3727 return false; 3728 auto *S = dyn_cast_or_null<MDString>(T->getOperand(0)); 3729 if (!S) 3730 return false; 3731 return S->getString().startswith("llvm.vectorizer."); 3732 } 3733 3734 static MDString *upgradeLoopTag(LLVMContext &C, StringRef OldTag) { 3735 StringRef OldPrefix = "llvm.vectorizer."; 3736 assert(OldTag.startswith(OldPrefix) && "Expected old prefix"); 3737 3738 if (OldTag == "llvm.vectorizer.unroll") 3739 return MDString::get(C, "llvm.loop.interleave.count"); 3740 3741 return MDString::get( 3742 C, (Twine("llvm.loop.vectorize.") + OldTag.drop_front(OldPrefix.size())) 3743 .str()); 3744 } 3745 3746 static Metadata *upgradeLoopArgument(Metadata *MD) { 3747 auto *T = dyn_cast_or_null<MDTuple>(MD); 3748 if (!T) 3749 return MD; 3750 if (T->getNumOperands() < 1) 3751 return MD; 3752 auto *OldTag = dyn_cast_or_null<MDString>(T->getOperand(0)); 3753 if (!OldTag) 3754 return MD; 3755 if (!OldTag->getString().startswith("llvm.vectorizer.")) 3756 return MD; 3757 3758 // This has an old tag. Upgrade it. 3759 SmallVector<Metadata *, 8> Ops; 3760 Ops.reserve(T->getNumOperands()); 3761 Ops.push_back(upgradeLoopTag(T->getContext(), OldTag->getString())); 3762 for (unsigned I = 1, E = T->getNumOperands(); I != E; ++I) 3763 Ops.push_back(T->getOperand(I)); 3764 3765 return MDTuple::get(T->getContext(), Ops); 3766 } 3767 3768 MDNode *llvm::upgradeInstructionLoopAttachment(MDNode &N) { 3769 auto *T = dyn_cast<MDTuple>(&N); 3770 if (!T) 3771 return &N; 3772 3773 if (none_of(T->operands(), isOldLoopArgument)) 3774 return &N; 3775 3776 SmallVector<Metadata *, 8> Ops; 3777 Ops.reserve(T->getNumOperands()); 3778 for (Metadata *MD : T->operands()) 3779 Ops.push_back(upgradeLoopArgument(MD)); 3780 3781 return MDTuple::get(T->getContext(), Ops); 3782 } 3783