1 //===- PolynomialApproximation.cpp - Approximate math operations ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements expansion of math operations to fast approximations 10 // that do not rely on any of the library functions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include <climits> 15 #include <cstddef> 16 17 #include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" 18 #include "mlir/Dialect/Math/IR/Math.h" 19 #include "mlir/Dialect/Math/Transforms/Approximation.h" 20 #include "mlir/Dialect/Math/Transforms/Passes.h" 21 #include "mlir/Dialect/Utils/IndexingUtils.h" 22 #include "mlir/Dialect/Vector/IR/VectorOps.h" 23 #include "mlir/Dialect/Vector/Utils/VectorUtils.h" 24 #include "mlir/Dialect/X86Vector/X86VectorDialect.h" 25 #include "mlir/IR/Builders.h" 26 #include "mlir/IR/BuiltinTypes.h" 27 #include "mlir/IR/ImplicitLocOpBuilder.h" 28 #include "mlir/IR/OpDefinition.h" 29 #include "mlir/IR/PatternMatch.h" 30 #include "mlir/IR/TypeUtilities.h" 31 #include "mlir/Transforms/DialectConversion.h" 32 #include "mlir/Transforms/GreedyPatternRewriteDriver.h" 33 #include "llvm/ADT/ArrayRef.h" 34 #include "llvm/ADT/STLExtras.h" 35 36 using namespace mlir; 37 using namespace mlir::math; 38 using namespace mlir::vector; 39 40 // Returns vector shape if the type is a vector. Returns an empty shape if it is 41 // not a vector. 42 static ArrayRef<int64_t> vectorShape(Type type) { 43 auto vectorType = type.dyn_cast<VectorType>(); 44 return vectorType ? vectorType.getShape() : ArrayRef<int64_t>(); 45 } 46 47 static ArrayRef<int64_t> vectorShape(Value value) { 48 return vectorShape(value.getType()); 49 } 50 51 //----------------------------------------------------------------------------// 52 // Broadcast scalar types and values into vector types and values. 53 //----------------------------------------------------------------------------// 54 55 // Broadcasts scalar type into vector type (iff shape is non-scalar). 56 static Type broadcast(Type type, ArrayRef<int64_t> shape) { 57 assert(!type.isa<VectorType>() && "must be scalar type"); 58 return !shape.empty() ? VectorType::get(shape, type) : type; 59 } 60 61 // Broadcasts scalar value into vector (iff shape is non-scalar). 62 static Value broadcast(ImplicitLocOpBuilder &builder, Value value, 63 ArrayRef<int64_t> shape) { 64 assert(!value.getType().isa<VectorType>() && "must be scalar value"); 65 auto type = broadcast(value.getType(), shape); 66 return !shape.empty() ? builder.create<BroadcastOp>(type, value) : value; 67 } 68 69 //----------------------------------------------------------------------------// 70 // Helper function to handle n-D vectors with 1-D operations. 71 //----------------------------------------------------------------------------// 72 73 // Expands and unrolls n-D vector operands into multiple fixed size 1-D vectors 74 // and calls the compute function with 1-D vector operands. Stitches back all 75 // results into the original n-D vector result. 76 // 77 // Examples: vectorWidth = 8 78 // - vector<4x8xf32> unrolled 4 times 79 // - vector<16xf32> expanded to vector<2x8xf32> and unrolled 2 times 80 // - vector<4x16xf32> expanded to vector<4x2x8xf32> and unrolled 4*2 times 81 // 82 // Some math approximations rely on ISA-specific operations that only accept 83 // fixed size 1-D vectors (e.g. AVX expects vectors of width 8). 84 // 85 // It is the caller's responsibility to verify that the inner dimension is 86 // divisible by the vectorWidth, and that all operands have the same vector 87 // shape. 88 static Value 89 handleMultidimensionalVectors(ImplicitLocOpBuilder &builder, 90 ValueRange operands, int64_t vectorWidth, 91 llvm::function_ref<Value(ValueRange)> compute) { 92 assert(!operands.empty() && "operands must be not empty"); 93 assert(vectorWidth > 0 && "vector width must be larger than 0"); 94 95 VectorType inputType = operands[0].getType().cast<VectorType>(); 96 ArrayRef<int64_t> inputShape = inputType.getShape(); 97 98 // If input shape matches target vector width, we can just call the 99 // user-provided compute function with the operands. 100 if (inputShape == llvm::makeArrayRef(vectorWidth)) 101 return compute(operands); 102 103 // Check if the inner dimension has to be expanded, or we can directly iterate 104 // over the outer dimensions of the vector. 105 int64_t innerDim = inputShape.back(); 106 int64_t expansionDim = innerDim / vectorWidth; 107 assert((innerDim % vectorWidth == 0) && "invalid inner dimension size"); 108 109 // Maybe expand operands to the higher rank vector shape that we'll use to 110 // iterate over and extract one dimensional vectors. 111 SmallVector<int64_t> expandedShape(inputShape.begin(), inputShape.end()); 112 SmallVector<Value> expandedOperands(operands); 113 114 if (expansionDim > 1) { 115 // Expand shape from [..., innerDim] to [..., expansionDim, vectorWidth]. 116 expandedShape.insert(expandedShape.end() - 1, expansionDim); 117 expandedShape.back() = vectorWidth; 118 119 for (unsigned i = 0; i < operands.size(); ++i) { 120 auto operand = operands[i]; 121 auto eltType = operand.getType().cast<VectorType>().getElementType(); 122 auto expandedType = VectorType::get(expandedShape, eltType); 123 expandedOperands[i] = 124 builder.create<vector::ShapeCastOp>(expandedType, operand); 125 } 126 } 127 128 // Iterate over all outer dimensions of the compute shape vector type. 129 auto iterationDims = ArrayRef<int64_t>(expandedShape).drop_back(); 130 int64_t maxLinearIndex = computeMaxLinearIndex(iterationDims); 131 132 SmallVector<int64_t> ones(iterationDims.size(), 1); 133 auto strides = computeStrides(iterationDims, ones); 134 135 // Compute results for each one dimensional vector. 136 SmallVector<Value> results(maxLinearIndex); 137 138 for (int64_t i = 0; i < maxLinearIndex; ++i) { 139 auto offsets = delinearize(strides, i); 140 141 SmallVector<Value> extracted(expandedOperands.size()); 142 for (const auto &tuple : llvm::enumerate(expandedOperands)) 143 extracted[tuple.index()] = 144 builder.create<vector::ExtractOp>(tuple.value(), offsets); 145 146 results[i] = compute(extracted); 147 } 148 149 // Stitch results together into one large vector. 150 Type resultEltType = results[0].getType().cast<VectorType>().getElementType(); 151 Type resultExpandedType = VectorType::get(expandedShape, resultEltType); 152 Value result = builder.create<arith::ConstantOp>( 153 resultExpandedType, builder.getZeroAttr(resultExpandedType)); 154 155 for (int64_t i = 0; i < maxLinearIndex; ++i) 156 result = builder.create<vector::InsertOp>(results[i], result, 157 delinearize(strides, i)); 158 159 // Reshape back to the original vector shape. 160 return builder.create<vector::ShapeCastOp>( 161 VectorType::get(inputShape, resultEltType), result); 162 } 163 164 //----------------------------------------------------------------------------// 165 // Helper functions to create constants. 166 //----------------------------------------------------------------------------// 167 168 static Value f32Cst(ImplicitLocOpBuilder &builder, float value) { 169 return builder.create<arith::ConstantOp>(builder.getF32FloatAttr(value)); 170 } 171 172 static Value i32Cst(ImplicitLocOpBuilder &builder, int32_t value) { 173 return builder.create<arith::ConstantOp>(builder.getI32IntegerAttr(value)); 174 } 175 176 static Value f32FromBits(ImplicitLocOpBuilder &builder, uint32_t bits) { 177 Value i32Value = i32Cst(builder, static_cast<int32_t>(bits)); 178 return builder.create<arith::BitcastOp>(builder.getF32Type(), i32Value); 179 } 180 181 //----------------------------------------------------------------------------// 182 // Helper functions to build math functions approximations. 183 //----------------------------------------------------------------------------// 184 185 static Value min(ImplicitLocOpBuilder &builder, Value a, Value b) { 186 return builder.create<arith::SelectOp>( 187 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, a, b), a, b); 188 } 189 190 static Value max(ImplicitLocOpBuilder &builder, Value a, Value b) { 191 return builder.create<arith::SelectOp>( 192 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, a, b), a, b); 193 } 194 195 static Value clamp(ImplicitLocOpBuilder &builder, Value value, Value lowerBound, 196 Value upperBound) { 197 return max(builder, min(builder, value, upperBound), lowerBound); 198 } 199 200 // Decomposes given floating point value `arg` into a normalized fraction and 201 // an integral power of two (see std::frexp). Returned values have float type. 202 static std::pair<Value, Value> frexp(ImplicitLocOpBuilder &builder, Value arg, 203 bool isPositive = false) { 204 assert(getElementTypeOrSelf(arg).isF32() && "arg must be f32 type"); 205 ArrayRef<int64_t> shape = vectorShape(arg); 206 207 auto bcast = [&](Value value) -> Value { 208 return broadcast(builder, value, shape); 209 }; 210 211 auto i32 = builder.getIntegerType(32); 212 auto i32Vec = broadcast(i32, shape); 213 auto f32Vec = broadcast(builder.getF32Type(), shape); 214 215 Value cst126f = f32Cst(builder, 126.0f); 216 Value cstHalf = f32Cst(builder, 0.5f); 217 Value cstInvMantMask = f32FromBits(builder, ~0x7f800000u); 218 219 // Bitcast to i32 for bitwise operations. 220 Value i32Half = builder.create<arith::BitcastOp>(i32, cstHalf); 221 Value i32InvMantMask = builder.create<arith::BitcastOp>(i32, cstInvMantMask); 222 Value i32Arg = builder.create<arith::BitcastOp>(i32Vec, arg); 223 224 // Compute normalized fraction. 225 Value tmp0 = builder.create<arith::AndIOp>(i32Arg, bcast(i32InvMantMask)); 226 Value tmp1 = builder.create<arith::OrIOp>(tmp0, bcast(i32Half)); 227 Value normalizedFraction = builder.create<arith::BitcastOp>(f32Vec, tmp1); 228 229 // Compute exponent. 230 Value arg0 = isPositive ? arg : builder.create<math::AbsOp>(arg); 231 Value biasedExponentBits = builder.create<arith::ShRUIOp>( 232 builder.create<arith::BitcastOp>(i32Vec, arg0), 233 bcast(i32Cst(builder, 23))); 234 Value biasedExponent = 235 builder.create<arith::SIToFPOp>(f32Vec, biasedExponentBits); 236 Value exponent = 237 builder.create<arith::SubFOp>(biasedExponent, bcast(cst126f)); 238 239 return {normalizedFraction, exponent}; 240 } 241 242 // Computes exp2 for an i32 argument. 243 static Value exp2I32(ImplicitLocOpBuilder &builder, Value arg) { 244 assert(getElementTypeOrSelf(arg).isInteger(32) && "arg must be i32 type"); 245 ArrayRef<int64_t> shape = vectorShape(arg); 246 247 auto bcast = [&](Value value) -> Value { 248 return broadcast(builder, value, shape); 249 }; 250 251 auto f32Vec = broadcast(builder.getF32Type(), shape); 252 // The exponent of f32 located at 23-bit. 253 auto exponetBitLocation = bcast(i32Cst(builder, 23)); 254 // Set the exponent bias to zero. 255 auto bias = bcast(i32Cst(builder, 127)); 256 257 Value biasedArg = builder.create<arith::AddIOp>(arg, bias); 258 Value exp2ValueInt = 259 builder.create<arith::ShLIOp>(biasedArg, exponetBitLocation); 260 Value exp2ValueF32 = builder.create<arith::BitcastOp>(f32Vec, exp2ValueInt); 261 262 return exp2ValueF32; 263 } 264 265 namespace { 266 Value makePolynomialCalculation(ImplicitLocOpBuilder &builder, 267 llvm::ArrayRef<Value> coeffs, Value x) { 268 assert(getElementTypeOrSelf(x).isF32() && "x must be f32 type"); 269 ArrayRef<int64_t> shape = vectorShape(x); 270 271 if (coeffs.empty()) 272 return broadcast(builder, f32Cst(builder, 0.0f), shape); 273 274 if (coeffs.size() == 1) 275 return coeffs[0]; 276 277 Value res = builder.create<math::FmaOp>(x, coeffs[coeffs.size() - 1], 278 coeffs[coeffs.size() - 2]); 279 for (auto i = ptrdiff_t(coeffs.size()) - 3; i >= 0; --i) { 280 res = builder.create<math::FmaOp>(x, res, coeffs[i]); 281 } 282 return res; 283 } 284 } // namespace 285 286 //----------------------------------------------------------------------------// 287 // Helper function/pattern to insert casts for reusing F32 bit expansion. 288 //----------------------------------------------------------------------------// 289 290 template <typename T> 291 LogicalResult insertCasts(Operation *op, PatternRewriter &rewriter) { 292 // Conservatively only allow where the operand and result types are exactly 1. 293 Type origType = op->getResultTypes().front(); 294 for (Type t : llvm::drop_begin(op->getResultTypes())) 295 if (origType != t) 296 return rewriter.notifyMatchFailure(op, "required all types to match"); 297 for (Type t : op->getOperandTypes()) 298 if (origType != t) 299 return rewriter.notifyMatchFailure(op, "required all types to match"); 300 301 // Skip if already F32 or larger than 32 bits. 302 if (getElementTypeOrSelf(origType).isF32() || 303 getElementTypeOrSelf(origType).getIntOrFloatBitWidth() > 32) 304 return failure(); 305 306 // Create F32 equivalent type. 307 Type newType; 308 if (auto shaped = origType.dyn_cast<ShapedType>()) { 309 newType = shaped.clone(rewriter.getF32Type()); 310 } else if (origType.isa<FloatType>()) { 311 newType = rewriter.getF32Type(); 312 } else { 313 return rewriter.notifyMatchFailure(op, 314 "unable to find F32 equivalent type"); 315 } 316 317 Location loc = op->getLoc(); 318 SmallVector<Value> operands; 319 for (auto operand : op->getOperands()) 320 operands.push_back(rewriter.create<arith::ExtFOp>(loc, newType, operand)); 321 auto result = rewriter.create<math::Atan2Op>(loc, newType, operands); 322 rewriter.replaceOpWithNewOp<arith::TruncFOp>(op, origType, result); 323 return success(); 324 } 325 326 namespace { 327 // Pattern to cast to F32 to reuse F32 expansion as fallback for single-result 328 // op. 329 // TODO: Consider revising to avoid adding multiple casts for a subgraph that is 330 // all in lower precision. Currently this is only fallback support and performs 331 // simplistic casting. 332 template <typename T> 333 struct ReuseF32Expansion : public OpRewritePattern<T> { 334 public: 335 using OpRewritePattern<T>::OpRewritePattern; 336 LogicalResult matchAndRewrite(T op, PatternRewriter &rewriter) const final { 337 static_assert( 338 T::template hasTrait<mlir::OpTrait::SameOperandsAndResultType>(), 339 "requires same operands and result types"); 340 return insertCasts<T>(op, rewriter); 341 } 342 }; 343 } // namespace 344 345 //----------------------------------------------------------------------------// 346 // AtanOp approximation. 347 //----------------------------------------------------------------------------// 348 349 namespace { 350 struct AtanApproximation : public OpRewritePattern<math::AtanOp> { 351 public: 352 using OpRewritePattern::OpRewritePattern; 353 354 LogicalResult matchAndRewrite(math::AtanOp op, 355 PatternRewriter &rewriter) const final; 356 }; 357 } // namespace 358 359 LogicalResult 360 AtanApproximation::matchAndRewrite(math::AtanOp op, 361 PatternRewriter &rewriter) const { 362 auto operand = op.getOperand(); 363 if (!getElementTypeOrSelf(operand).isF32()) 364 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 365 366 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 367 368 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 369 auto one = broadcast(builder, f32Cst(builder, 1.0f), shape); 370 371 // Remap the problem over [0.0, 1.0] by looking at the absolute value and the 372 // handling symmetry. 373 Value abs = builder.create<math::AbsOp>(operand); 374 Value reciprocal = builder.create<arith::DivFOp>(one, abs); 375 Value compare = 376 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, abs, reciprocal); 377 Value x = builder.create<arith::SelectOp>(compare, abs, reciprocal); 378 379 // Perform the Taylor series approximation for atan over the range 380 // [-1.0, 1.0]. 381 auto n1 = broadcast(builder, f32Cst(builder, 0.14418283f), shape); 382 auto n2 = broadcast(builder, f32Cst(builder, -0.34999234f), shape); 383 auto n3 = broadcast(builder, f32Cst(builder, -0.01067831f), shape); 384 auto n4 = broadcast(builder, f32Cst(builder, 1.00209986f), shape); 385 386 Value p = builder.create<math::FmaOp>(x, n1, n2); 387 p = builder.create<math::FmaOp>(x, p, n3); 388 p = builder.create<math::FmaOp>(x, p, n4); 389 p = builder.create<arith::MulFOp>(x, p); 390 391 // Remap the solution for over [0.0, 1.0] to [0.0, inf] 392 auto halfPi = broadcast(builder, f32Cst(builder, 1.57079632679f), shape); 393 Value sub = builder.create<arith::SubFOp>(halfPi, p); 394 Value select = builder.create<arith::SelectOp>(compare, p, sub); 395 396 // Correct for signing of the input. 397 rewriter.replaceOpWithNewOp<math::CopySignOp>(op, select, operand); 398 return success(); 399 } 400 401 //----------------------------------------------------------------------------// 402 // AtanOp approximation. 403 //----------------------------------------------------------------------------// 404 405 namespace { 406 struct Atan2Approximation : public OpRewritePattern<math::Atan2Op> { 407 public: 408 using OpRewritePattern::OpRewritePattern; 409 410 LogicalResult matchAndRewrite(math::Atan2Op op, 411 PatternRewriter &rewriter) const final; 412 }; 413 } // namespace 414 415 LogicalResult 416 Atan2Approximation::matchAndRewrite(math::Atan2Op op, 417 PatternRewriter &rewriter) const { 418 auto y = op.getOperand(0); 419 auto x = op.getOperand(1); 420 if (!getElementTypeOrSelf(x).isF32()) 421 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 422 423 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 424 ArrayRef<int64_t> shape = vectorShape(op.getResult()); 425 426 // Compute atan in the valid range. 427 auto div = builder.create<arith::DivFOp>(y, x); 428 auto atan = builder.create<math::AtanOp>(div); 429 430 // Determine what the atan would be for a 180 degree rotation. 431 auto zero = broadcast(builder, f32Cst(builder, 0.0f), shape); 432 auto pi = broadcast(builder, f32Cst(builder, 3.14159265359f), shape); 433 auto addPi = builder.create<arith::AddFOp>(atan, pi); 434 auto subPi = builder.create<arith::SubFOp>(atan, pi); 435 auto atanGt = 436 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, atan, zero); 437 auto flippedAtan = builder.create<arith::SelectOp>(atanGt, subPi, addPi); 438 439 // Determine whether to directly use atan or use the 180 degree flip 440 auto xGt = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, x, zero); 441 Value result = builder.create<arith::SelectOp>(xGt, atan, flippedAtan); 442 443 // Handle x = 0, y > 0 444 Value xZero = 445 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, x, zero); 446 Value yGt = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, y, zero); 447 Value isHalfPi = builder.create<arith::AndIOp>(xZero, yGt); 448 auto halfPi = broadcast(builder, f32Cst(builder, 1.57079632679f), shape); 449 result = builder.create<arith::SelectOp>(isHalfPi, halfPi, result); 450 451 // Handle x = 0, y < 0 452 Value yLt = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, y, zero); 453 Value isNegativeHalfPiPi = builder.create<arith::AndIOp>(xZero, yLt); 454 auto negativeHalfPiPi = 455 broadcast(builder, f32Cst(builder, -1.57079632679f), shape); 456 result = builder.create<arith::SelectOp>(isNegativeHalfPiPi, negativeHalfPiPi, 457 result); 458 459 // Handle x = 0, y = 0; 460 Value yZero = 461 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, y, zero); 462 Value isNan = builder.create<arith::AndIOp>(xZero, yZero); 463 Value cstNan = broadcast(builder, f32FromBits(builder, 0x7fc00000), shape); 464 result = builder.create<arith::SelectOp>(isNan, cstNan, result); 465 466 rewriter.replaceOp(op, result); 467 return success(); 468 } 469 470 //----------------------------------------------------------------------------// 471 // TanhOp approximation. 472 //----------------------------------------------------------------------------// 473 474 namespace { 475 struct TanhApproximation : public OpRewritePattern<math::TanhOp> { 476 public: 477 using OpRewritePattern::OpRewritePattern; 478 479 LogicalResult matchAndRewrite(math::TanhOp op, 480 PatternRewriter &rewriter) const final; 481 }; 482 } // namespace 483 484 LogicalResult 485 TanhApproximation::matchAndRewrite(math::TanhOp op, 486 PatternRewriter &rewriter) const { 487 if (!getElementTypeOrSelf(op.getOperand()).isF32()) 488 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 489 490 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 491 492 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 493 auto bcast = [&](Value value) -> Value { 494 return broadcast(builder, value, shape); 495 }; 496 497 // Clamp operand into [plusClamp, minusClamp] range. 498 Value minusClamp = bcast(f32Cst(builder, -7.99881172180175781f)); 499 Value plusClamp = bcast(f32Cst(builder, 7.99881172180175781f)); 500 Value x = clamp(builder, op.getOperand(), minusClamp, plusClamp); 501 502 // Mask for tiny values that are approximated with `operand`. 503 Value tiny = bcast(f32Cst(builder, 0.0004f)); 504 Value tinyMask = builder.create<arith::CmpFOp>( 505 arith::CmpFPredicate::OLT, builder.create<math::AbsOp>(op.getOperand()), 506 tiny); 507 508 // The monomial coefficients of the numerator polynomial (odd). 509 Value alpha1 = bcast(f32Cst(builder, 4.89352455891786e-03f)); 510 Value alpha3 = bcast(f32Cst(builder, 6.37261928875436e-04f)); 511 Value alpha5 = bcast(f32Cst(builder, 1.48572235717979e-05f)); 512 Value alpha7 = bcast(f32Cst(builder, 5.12229709037114e-08f)); 513 Value alpha9 = bcast(f32Cst(builder, -8.60467152213735e-11f)); 514 Value alpha11 = bcast(f32Cst(builder, 2.00018790482477e-13f)); 515 Value alpha13 = bcast(f32Cst(builder, -2.76076847742355e-16f)); 516 517 // The monomial coefficients of the denominator polynomial (even). 518 Value beta0 = bcast(f32Cst(builder, 4.89352518554385e-03f)); 519 Value beta2 = bcast(f32Cst(builder, 2.26843463243900e-03f)); 520 Value beta4 = bcast(f32Cst(builder, 1.18534705686654e-04f)); 521 Value beta6 = bcast(f32Cst(builder, 1.19825839466702e-06f)); 522 523 // Since the polynomials are odd/even, we need x^2. 524 Value x2 = builder.create<arith::MulFOp>(x, x); 525 526 // Evaluate the numerator polynomial p. 527 Value p = builder.create<math::FmaOp>(x2, alpha13, alpha11); 528 p = builder.create<math::FmaOp>(x2, p, alpha9); 529 p = builder.create<math::FmaOp>(x2, p, alpha7); 530 p = builder.create<math::FmaOp>(x2, p, alpha5); 531 p = builder.create<math::FmaOp>(x2, p, alpha3); 532 p = builder.create<math::FmaOp>(x2, p, alpha1); 533 p = builder.create<arith::MulFOp>(x, p); 534 535 // Evaluate the denominator polynomial q. 536 Value q = builder.create<math::FmaOp>(x2, beta6, beta4); 537 q = builder.create<math::FmaOp>(x2, q, beta2); 538 q = builder.create<math::FmaOp>(x2, q, beta0); 539 540 // Divide the numerator by the denominator. 541 Value res = builder.create<arith::SelectOp>( 542 tinyMask, x, builder.create<arith::DivFOp>(p, q)); 543 544 rewriter.replaceOp(op, res); 545 546 return success(); 547 } 548 549 #define LN2_VALUE \ 550 0.693147180559945309417232121458176568075500134360255254120680009493393621L 551 #define LOG2E_VALUE \ 552 1.442695040888963407359924681001892137426645954152985934135449406931109219L 553 554 //----------------------------------------------------------------------------// 555 // LogOp and Log2Op approximation. 556 //----------------------------------------------------------------------------// 557 558 namespace { 559 template <typename Op> 560 struct LogApproximationBase : public OpRewritePattern<Op> { 561 using OpRewritePattern<Op>::OpRewritePattern; 562 563 /// Base 2 if 'base2' is set; natural logarithm (base e) otherwise. 564 LogicalResult logMatchAndRewrite(Op op, PatternRewriter &rewriter, 565 bool base2) const; 566 }; 567 } // namespace 568 569 // This approximation comes from Julien Pommier's SSE math library. 570 // Link: http://gruntthepeon.free.fr/ssemath 571 template <typename Op> 572 LogicalResult 573 LogApproximationBase<Op>::logMatchAndRewrite(Op op, PatternRewriter &rewriter, 574 bool base2) const { 575 if (!getElementTypeOrSelf(op.getOperand()).isF32()) 576 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 577 578 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 579 580 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 581 auto bcast = [&](Value value) -> Value { 582 return broadcast(builder, value, shape); 583 }; 584 585 Value cstZero = bcast(f32Cst(builder, 0.0f)); 586 Value cstOne = bcast(f32Cst(builder, 1.0f)); 587 Value cstNegHalf = bcast(f32Cst(builder, -0.5f)); 588 589 // The smallest non denormalized float number. 590 Value cstMinNormPos = bcast(f32FromBits(builder, 0x00800000u)); 591 Value cstMinusInf = bcast(f32FromBits(builder, 0xff800000u)); 592 Value cstPosInf = bcast(f32FromBits(builder, 0x7f800000u)); 593 Value cstNan = bcast(f32FromBits(builder, 0x7fc00000)); 594 595 // Polynomial coefficients. 596 Value cstCephesSQRTHF = bcast(f32Cst(builder, 0.707106781186547524f)); 597 Value cstCephesLogP0 = bcast(f32Cst(builder, 7.0376836292E-2f)); 598 Value cstCephesLogP1 = bcast(f32Cst(builder, -1.1514610310E-1f)); 599 Value cstCephesLogP2 = bcast(f32Cst(builder, 1.1676998740E-1f)); 600 Value cstCephesLogP3 = bcast(f32Cst(builder, -1.2420140846E-1f)); 601 Value cstCephesLogP4 = bcast(f32Cst(builder, +1.4249322787E-1f)); 602 Value cstCephesLogP5 = bcast(f32Cst(builder, -1.6668057665E-1f)); 603 Value cstCephesLogP6 = bcast(f32Cst(builder, +2.0000714765E-1f)); 604 Value cstCephesLogP7 = bcast(f32Cst(builder, -2.4999993993E-1f)); 605 Value cstCephesLogP8 = bcast(f32Cst(builder, +3.3333331174E-1f)); 606 607 Value x = op.getOperand(); 608 609 // Truncate input values to the minimum positive normal. 610 x = max(builder, x, cstMinNormPos); 611 612 // Extract significant in the range [0.5,1) and exponent. 613 std::pair<Value, Value> pair = frexp(builder, x, /*isPositive=*/true); 614 x = pair.first; 615 Value e = pair.second; 616 617 // Shift the inputs from the range [0.5,1) to [sqrt(1/2), sqrt(2)) and shift 618 // by -1.0. The values are then centered around 0, which improves the 619 // stability of the polynomial evaluation: 620 // 621 // if( x < SQRTHF ) { 622 // e -= 1; 623 // x = x + x - 1.0; 624 // } else { x = x - 1.0; } 625 Value mask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, x, 626 cstCephesSQRTHF); 627 Value tmp = builder.create<arith::SelectOp>(mask, x, cstZero); 628 629 x = builder.create<arith::SubFOp>(x, cstOne); 630 e = builder.create<arith::SubFOp>( 631 e, builder.create<arith::SelectOp>(mask, cstOne, cstZero)); 632 x = builder.create<arith::AddFOp>(x, tmp); 633 634 Value x2 = builder.create<arith::MulFOp>(x, x); 635 Value x3 = builder.create<arith::MulFOp>(x2, x); 636 637 // Evaluate the polynomial approximant of degree 8 in three parts. 638 Value y0, y1, y2; 639 y0 = builder.create<math::FmaOp>(cstCephesLogP0, x, cstCephesLogP1); 640 y1 = builder.create<math::FmaOp>(cstCephesLogP3, x, cstCephesLogP4); 641 y2 = builder.create<math::FmaOp>(cstCephesLogP6, x, cstCephesLogP7); 642 y0 = builder.create<math::FmaOp>(y0, x, cstCephesLogP2); 643 y1 = builder.create<math::FmaOp>(y1, x, cstCephesLogP5); 644 y2 = builder.create<math::FmaOp>(y2, x, cstCephesLogP8); 645 y0 = builder.create<math::FmaOp>(y0, x3, y1); 646 y0 = builder.create<math::FmaOp>(y0, x3, y2); 647 y0 = builder.create<arith::MulFOp>(y0, x3); 648 649 y0 = builder.create<math::FmaOp>(cstNegHalf, x2, y0); 650 x = builder.create<arith::AddFOp>(x, y0); 651 652 if (base2) { 653 Value cstLog2e = bcast(f32Cst(builder, static_cast<float>(LOG2E_VALUE))); 654 x = builder.create<math::FmaOp>(x, cstLog2e, e); 655 } else { 656 Value cstLn2 = bcast(f32Cst(builder, static_cast<float>(LN2_VALUE))); 657 x = builder.create<math::FmaOp>(e, cstLn2, x); 658 } 659 660 Value invalidMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::ULT, 661 op.getOperand(), cstZero); 662 Value zeroMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, 663 op.getOperand(), cstZero); 664 Value posInfMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, 665 op.getOperand(), cstPosInf); 666 667 // Filter out invalid values: 668 // • x == 0 -> -INF 669 // • x < 0 -> NAN 670 // • x == +INF -> +INF 671 Value aproximation = builder.create<arith::SelectOp>( 672 zeroMask, cstMinusInf, 673 builder.create<arith::SelectOp>( 674 invalidMask, cstNan, 675 builder.create<arith::SelectOp>(posInfMask, cstPosInf, x))); 676 677 rewriter.replaceOp(op, aproximation); 678 679 return success(); 680 } 681 682 namespace { 683 struct LogApproximation : public LogApproximationBase<math::LogOp> { 684 using LogApproximationBase::LogApproximationBase; 685 686 LogicalResult matchAndRewrite(math::LogOp op, 687 PatternRewriter &rewriter) const final { 688 return logMatchAndRewrite(op, rewriter, /*base2=*/false); 689 } 690 }; 691 } // namespace 692 693 namespace { 694 struct Log2Approximation : public LogApproximationBase<math::Log2Op> { 695 using LogApproximationBase::LogApproximationBase; 696 697 LogicalResult matchAndRewrite(math::Log2Op op, 698 PatternRewriter &rewriter) const final { 699 return logMatchAndRewrite(op, rewriter, /*base2=*/true); 700 } 701 }; 702 } // namespace 703 704 //----------------------------------------------------------------------------// 705 // Log1p approximation. 706 //----------------------------------------------------------------------------// 707 708 namespace { 709 struct Log1pApproximation : public OpRewritePattern<math::Log1pOp> { 710 public: 711 using OpRewritePattern::OpRewritePattern; 712 713 LogicalResult matchAndRewrite(math::Log1pOp op, 714 PatternRewriter &rewriter) const final; 715 }; 716 } // namespace 717 718 // Approximate log(1+x). 719 LogicalResult 720 Log1pApproximation::matchAndRewrite(math::Log1pOp op, 721 PatternRewriter &rewriter) const { 722 if (!getElementTypeOrSelf(op.getOperand()).isF32()) 723 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 724 725 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 726 727 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 728 auto bcast = [&](Value value) -> Value { 729 return broadcast(builder, value, shape); 730 }; 731 732 // Approximate log(1+x) using the following, due to W. Kahan: 733 // u = x + 1.0; 734 // if (u == 1.0 || u == inf) return x; 735 // return x * log(u) / (u - 1.0); 736 // ^^^^^^^^^^^^^^^^^^^^^^ 737 // "logLarge" below. 738 Value cstOne = bcast(f32Cst(builder, 1.0f)); 739 Value x = op.getOperand(); 740 Value u = builder.create<arith::AddFOp>(x, cstOne); 741 Value uSmall = 742 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, u, cstOne); 743 Value logU = builder.create<math::LogOp>(u); 744 Value uInf = 745 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, u, logU); 746 Value logLarge = builder.create<arith::MulFOp>( 747 x, builder.create<arith::DivFOp>( 748 logU, builder.create<arith::SubFOp>(u, cstOne))); 749 Value approximation = builder.create<arith::SelectOp>( 750 builder.create<arith::OrIOp>(uSmall, uInf), x, logLarge); 751 rewriter.replaceOp(op, approximation); 752 return success(); 753 } 754 755 //----------------------------------------------------------------------------// 756 // Erf approximation. 757 //----------------------------------------------------------------------------// 758 759 // Approximates erf(x) with 760 // a - P(x)/Q(x) 761 // where P and Q are polynomials of degree 4. 762 // Different coefficients are chosen based on the value of x. 763 // The approximation error is ~2.5e-07. 764 // Boost's minimax tool that utilizes the Remez method was used to find the 765 // coefficients. 766 LogicalResult 767 ErfPolynomialApproximation::matchAndRewrite(math::ErfOp op, 768 PatternRewriter &rewriter) const { 769 if (!getElementTypeOrSelf(op.getOperand()).isF32()) 770 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 771 772 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 773 774 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 775 auto bcast = [&](Value value) -> Value { 776 return broadcast(builder, value, shape); 777 }; 778 779 const int intervalsCount = 3; 780 const int polyDegree = 4; 781 782 Value zero = bcast(f32Cst(builder, 0)); 783 Value one = bcast(f32Cst(builder, 1)); 784 Value pp[intervalsCount][polyDegree + 1]; 785 pp[0][0] = bcast(f32Cst(builder, +0.00000000000000000e+00f)); 786 pp[0][1] = bcast(f32Cst(builder, +1.12837916222975858e+00f)); 787 pp[0][2] = bcast(f32Cst(builder, -5.23018562988006470e-01f)); 788 pp[0][3] = bcast(f32Cst(builder, +2.09741709609267072e-01f)); 789 pp[0][4] = bcast(f32Cst(builder, +2.58146801602987875e-02f)); 790 pp[1][0] = bcast(f32Cst(builder, +0.00000000000000000e+00f)); 791 pp[1][1] = bcast(f32Cst(builder, +1.12750687816789140e+00f)); 792 pp[1][2] = bcast(f32Cst(builder, -3.64721408487825775e-01f)); 793 pp[1][3] = bcast(f32Cst(builder, +1.18407396425136952e-01f)); 794 pp[1][4] = bcast(f32Cst(builder, +3.70645533056476558e-02f)); 795 pp[2][0] = bcast(f32Cst(builder, -3.30093071049483172e-03f)); 796 pp[2][1] = bcast(f32Cst(builder, +3.51961938357697011e-03f)); 797 pp[2][2] = bcast(f32Cst(builder, -1.41373622814988039e-03f)); 798 pp[2][3] = bcast(f32Cst(builder, +2.53447094961941348e-04f)); 799 pp[2][4] = bcast(f32Cst(builder, -1.71048029455037401e-05f)); 800 801 Value qq[intervalsCount][polyDegree + 1]; 802 qq[0][0] = bcast(f32Cst(builder, +1.000000000000000000e+00f)); 803 qq[0][1] = bcast(f32Cst(builder, -4.635138185962547255e-01f)); 804 qq[0][2] = bcast(f32Cst(builder, +5.192301327279782447e-01f)); 805 qq[0][3] = bcast(f32Cst(builder, -1.318089722204810087e-01f)); 806 qq[0][4] = bcast(f32Cst(builder, +7.397964654672315005e-02f)); 807 qq[1][0] = bcast(f32Cst(builder, +1.00000000000000000e+00f)); 808 qq[1][1] = bcast(f32Cst(builder, -3.27607011824493086e-01f)); 809 qq[1][2] = bcast(f32Cst(builder, +4.48369090658821977e-01f)); 810 qq[1][3] = bcast(f32Cst(builder, -8.83462621207857930e-02f)); 811 qq[1][4] = bcast(f32Cst(builder, +5.72442770283176093e-02f)); 812 qq[2][0] = bcast(f32Cst(builder, +1.00000000000000000e+00f)); 813 qq[2][1] = bcast(f32Cst(builder, -2.06069165953913769e+00f)); 814 qq[2][2] = bcast(f32Cst(builder, +1.62705939945477759e+00f)); 815 qq[2][3] = bcast(f32Cst(builder, -5.83389859211130017e-01f)); 816 qq[2][4] = bcast(f32Cst(builder, +8.21908939856640930e-02f)); 817 818 Value offsets[intervalsCount]; 819 offsets[0] = bcast(f32Cst(builder, 0.0f)); 820 offsets[1] = bcast(f32Cst(builder, 0.0f)); 821 offsets[2] = bcast(f32Cst(builder, 1.0f)); 822 823 Value bounds[intervalsCount]; 824 bounds[0] = bcast(f32Cst(builder, 0.8f)); 825 bounds[1] = bcast(f32Cst(builder, 2.0f)); 826 bounds[2] = bcast(f32Cst(builder, 3.75f)); 827 828 Value isNegativeArg = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, 829 op.getOperand(), zero); 830 Value negArg = builder.create<arith::NegFOp>(op.getOperand()); 831 Value x = 832 builder.create<arith::SelectOp>(isNegativeArg, negArg, op.getOperand()); 833 834 Value offset = offsets[0]; 835 Value p[polyDegree + 1]; 836 Value q[polyDegree + 1]; 837 for (int i = 0; i <= polyDegree; ++i) { 838 p[i] = pp[0][i]; 839 q[i] = qq[0][i]; 840 } 841 842 // TODO: maybe use vector stacking to reduce the number of selects. 843 Value isLessThanBound[intervalsCount]; 844 for (int j = 0; j < intervalsCount - 1; ++j) { 845 isLessThanBound[j] = 846 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, x, bounds[j]); 847 for (int i = 0; i <= polyDegree; ++i) { 848 p[i] = builder.create<arith::SelectOp>(isLessThanBound[j], p[i], 849 pp[j + 1][i]); 850 q[i] = builder.create<arith::SelectOp>(isLessThanBound[j], q[i], 851 qq[j + 1][i]); 852 } 853 offset = builder.create<arith::SelectOp>(isLessThanBound[j], offset, 854 offsets[j + 1]); 855 } 856 isLessThanBound[intervalsCount - 1] = builder.create<arith::CmpFOp>( 857 arith::CmpFPredicate::ULT, x, bounds[intervalsCount - 1]); 858 859 Value pPoly = makePolynomialCalculation(builder, p, x); 860 Value qPoly = makePolynomialCalculation(builder, q, x); 861 Value rationalPoly = builder.create<arith::DivFOp>(pPoly, qPoly); 862 Value formula = builder.create<arith::AddFOp>(offset, rationalPoly); 863 formula = builder.create<arith::SelectOp>(isLessThanBound[intervalsCount - 1], 864 formula, one); 865 866 // erf is odd function: erf(x) = -erf(-x). 867 Value negFormula = builder.create<arith::NegFOp>(formula); 868 Value res = 869 builder.create<arith::SelectOp>(isNegativeArg, negFormula, formula); 870 871 rewriter.replaceOp(op, res); 872 873 return success(); 874 } 875 876 //----------------------------------------------------------------------------// 877 // Exp approximation. 878 //----------------------------------------------------------------------------// 879 880 namespace { 881 882 struct ExpApproximation : public OpRewritePattern<math::ExpOp> { 883 public: 884 using OpRewritePattern::OpRewritePattern; 885 886 LogicalResult matchAndRewrite(math::ExpOp op, 887 PatternRewriter &rewriter) const final; 888 }; 889 } // namespace 890 891 // Approximate exp(x) using its reduced range exp(y) where y is in the range 892 // [0, ln(2)], let y = x - floor(x / ln(2)) * ln(2) = x - k * ln(2), exp(x) 893 // = exp(y) * 2^k. exp(y). 894 LogicalResult 895 ExpApproximation::matchAndRewrite(math::ExpOp op, 896 PatternRewriter &rewriter) const { 897 if (!getElementTypeOrSelf(op.getOperand()).isF32()) 898 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 899 900 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 901 902 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 903 904 // TODO: Consider a common pattern rewriter with all methods below to 905 // write the approximations. 906 auto bcast = [&](Value value) -> Value { 907 return broadcast(builder, value, shape); 908 }; 909 auto fmla = [&](Value a, Value b, Value c) { 910 return builder.create<math::FmaOp>(a, b, c); 911 }; 912 auto mul = [&](Value a, Value b) -> Value { 913 return builder.create<arith::MulFOp>(a, b); 914 }; 915 auto sub = [&](Value a, Value b) -> Value { 916 return builder.create<arith::SubFOp>(a, b); 917 }; 918 auto floor = [&](Value a) { return builder.create<math::FloorOp>(a); }; 919 920 Value cstLn2 = bcast(f32Cst(builder, static_cast<float>(LN2_VALUE))); 921 Value cstLog2E = bcast(f32Cst(builder, static_cast<float>(LOG2E_VALUE))); 922 923 // Polynomial coefficients. 924 Value cstCephesExpP0 = bcast(f32Cst(builder, 1.0)); 925 Value cstCephesExpP1 = bcast(f32Cst(builder, 1.0)); 926 Value cstCephesExpP2 = bcast(f32Cst(builder, 0.49970514590562437052f)); 927 Value cstCephesExpP3 = bcast(f32Cst(builder, 0.16873890085469545053f)); 928 Value cstCephesExpP4 = bcast(f32Cst(builder, 0.03668965196652099192f)); 929 Value cstCephesExpP5 = bcast(f32Cst(builder, 0.01314350012789660196f)); 930 931 Value x = op.getOperand(); 932 933 // Reduced y = x - floor(x / ln(2)) * ln(2) = x - k * ln(2) 934 Value xL2Inv = mul(x, cstLog2E); 935 Value kF32 = floor(xL2Inv); 936 Value kLn2 = mul(kF32, cstLn2); 937 Value y = sub(x, kLn2); 938 939 // Use Estrin's evaluation scheme with 3 independent parts: 940 // P(y)^y : (c0 + c1 y) + (c2 + c3 y) y^2 + (c4 + c5 y) y^4 941 Value y2 = mul(y, y); 942 Value y4 = mul(y2, y2); 943 944 Value q0 = fmla(cstCephesExpP1, y, cstCephesExpP0); 945 Value q1 = fmla(cstCephesExpP3, y, cstCephesExpP2); 946 Value q2 = fmla(cstCephesExpP5, y, cstCephesExpP4); 947 Value expY = fmla(q1, y2, q0); 948 expY = fmla(q2, y4, expY); 949 950 auto i32Vec = broadcast(builder.getI32Type(), shape); 951 952 // exp2(k) 953 Value k = builder.create<arith::FPToSIOp>(i32Vec, kF32); 954 Value exp2KValue = exp2I32(builder, k); 955 956 // exp(x) = exp(y) * exp2(k) 957 expY = mul(expY, exp2KValue); 958 959 // Handle overflow, inf and underflow of exp(x). exp(x) range is [0, inf], its 960 // partitioned as the following: 961 // exp(x) = 0, x <= -inf 962 // exp(x) = underflow (min_float), x <= -88 963 // exp(x) = inf (min_float), x >= 88 964 // Note: |k| = 127 is the value where the 8-bits exponent saturates. 965 Value zerof32Const = bcast(f32Cst(builder, 0)); 966 auto constPosInfinity = 967 bcast(f32Cst(builder, std::numeric_limits<float>::infinity())); 968 auto constNegIfinity = 969 bcast(f32Cst(builder, -std::numeric_limits<float>::infinity())); 970 auto underflow = bcast(f32Cst(builder, std::numeric_limits<float>::min())); 971 972 Value kMaxConst = bcast(i32Cst(builder, 127)); 973 Value kMaxNegConst = bcast(i32Cst(builder, -127)); 974 Value rightBound = 975 builder.create<arith::CmpIOp>(arith::CmpIPredicate::sle, k, kMaxConst); 976 Value leftBound = 977 builder.create<arith::CmpIOp>(arith::CmpIPredicate::sge, k, kMaxNegConst); 978 979 Value isNegInfinityX = builder.create<arith::CmpFOp>( 980 arith::CmpFPredicate::OEQ, x, constNegIfinity); 981 Value isPosInfinityX = builder.create<arith::CmpFOp>( 982 arith::CmpFPredicate::OEQ, x, constPosInfinity); 983 Value isPostiveX = 984 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OGT, x, zerof32Const); 985 Value isComputable = builder.create<arith::AndIOp>(rightBound, leftBound); 986 987 expY = builder.create<arith::SelectOp>( 988 isNegInfinityX, zerof32Const, 989 builder.create<arith::SelectOp>( 990 isPosInfinityX, constPosInfinity, 991 builder.create<arith::SelectOp>( 992 isComputable, expY, 993 builder.create<arith::SelectOp>(isPostiveX, constPosInfinity, 994 underflow)))); 995 996 rewriter.replaceOp(op, expY); 997 998 return success(); 999 } 1000 1001 //----------------------------------------------------------------------------// 1002 // ExpM1 approximation. 1003 //----------------------------------------------------------------------------// 1004 1005 namespace { 1006 1007 struct ExpM1Approximation : public OpRewritePattern<math::ExpM1Op> { 1008 public: 1009 using OpRewritePattern::OpRewritePattern; 1010 1011 LogicalResult matchAndRewrite(math::ExpM1Op op, 1012 PatternRewriter &rewriter) const final; 1013 }; 1014 } // namespace 1015 1016 LogicalResult 1017 ExpM1Approximation::matchAndRewrite(math::ExpM1Op op, 1018 PatternRewriter &rewriter) const { 1019 if (!getElementTypeOrSelf(op.getOperand()).isF32()) 1020 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 1021 1022 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 1023 1024 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 1025 auto bcast = [&](Value value) -> Value { 1026 return broadcast(builder, value, shape); 1027 }; 1028 1029 // expm1(x) = exp(x) - 1 = u - 1. 1030 // We have to handle it carefully when x is near 0, i.e. u ~= 1, 1031 // and when the input is ~= -inf, i.e. u - 1 ~= -1. 1032 Value cstOne = bcast(f32Cst(builder, 1.0f)); 1033 Value cstNegOne = bcast(f32Cst(builder, -1.0f)); 1034 Value x = op.getOperand(); 1035 Value u = builder.create<math::ExpOp>(x); 1036 Value uEqOne = 1037 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, u, cstOne); 1038 Value uMinusOne = builder.create<arith::SubFOp>(u, cstOne); 1039 Value uMinusOneEqNegOne = builder.create<arith::CmpFOp>( 1040 arith::CmpFPredicate::OEQ, uMinusOne, cstNegOne); 1041 // logU = log(u) ~= x 1042 Value logU = builder.create<math::LogOp>(u); 1043 1044 // Detect exp(x) = +inf; written this way to avoid having to form +inf. 1045 Value isInf = 1046 builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, logU, u); 1047 1048 // (u - 1) * (x / ~x) 1049 Value expm1 = builder.create<arith::MulFOp>( 1050 uMinusOne, builder.create<arith::DivFOp>(x, logU)); 1051 expm1 = builder.create<arith::SelectOp>(isInf, u, expm1); 1052 Value approximation = builder.create<arith::SelectOp>( 1053 uEqOne, x, 1054 builder.create<arith::SelectOp>(uMinusOneEqNegOne, cstNegOne, expm1)); 1055 rewriter.replaceOp(op, approximation); 1056 return success(); 1057 } 1058 1059 //----------------------------------------------------------------------------// 1060 // Sin and Cos approximation. 1061 //----------------------------------------------------------------------------// 1062 1063 namespace { 1064 1065 template <bool isSine, typename OpTy> 1066 struct SinAndCosApproximation : public OpRewritePattern<OpTy> { 1067 public: 1068 using OpRewritePattern<OpTy>::OpRewritePattern; 1069 1070 LogicalResult matchAndRewrite(OpTy op, PatternRewriter &rewriter) const final; 1071 }; 1072 } // namespace 1073 1074 #define TWO_OVER_PI \ 1075 0.6366197723675813430755350534900574481378385829618257949906693762L 1076 #define PI_OVER_2 \ 1077 1.5707963267948966192313216916397514420985846996875529104874722961L 1078 1079 // Approximates sin(x) or cos(x) by finding the best approximation polynomial in 1080 // the reduced range [0, pi/2] for both sin(x) and cos(x). Then given y in the 1081 // reduced range sin(x) will be computed as sin(y), -sin(y), cos(y) or -cos(y). 1082 template <bool isSine, typename OpTy> 1083 LogicalResult SinAndCosApproximation<isSine, OpTy>::matchAndRewrite( 1084 OpTy op, PatternRewriter &rewriter) const { 1085 static_assert( 1086 llvm::is_one_of<OpTy, math::SinOp, math::CosOp>::value, 1087 "SinAndCosApproximation pattern expects math::SinOp or math::CosOp"); 1088 1089 if (!getElementTypeOrSelf(op.getOperand()).isF32()) 1090 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 1091 1092 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 1093 1094 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 1095 auto bcast = [&](Value value) -> Value { 1096 return broadcast(builder, value, shape); 1097 }; 1098 auto mul = [&](Value a, Value b) -> Value { 1099 return builder.create<arith::MulFOp>(a, b); 1100 }; 1101 auto sub = [&](Value a, Value b) -> Value { 1102 return builder.create<arith::SubFOp>(a, b); 1103 }; 1104 auto floor = [&](Value a) { return builder.create<math::FloorOp>(a); }; 1105 1106 auto i32Vec = broadcast(builder.getI32Type(), shape); 1107 auto fPToSingedInteger = [&](Value a) -> Value { 1108 return builder.create<arith::FPToSIOp>(i32Vec, a); 1109 }; 1110 1111 auto modulo4 = [&](Value a) -> Value { 1112 return builder.create<arith::AndIOp>(a, bcast(i32Cst(builder, 3))); 1113 }; 1114 1115 auto isEqualTo = [&](Value a, Value b) -> Value { 1116 return builder.create<arith::CmpIOp>(arith::CmpIPredicate::eq, a, b); 1117 }; 1118 1119 auto isGreaterThan = [&](Value a, Value b) -> Value { 1120 return builder.create<arith::CmpIOp>(arith::CmpIPredicate::sgt, a, b); 1121 }; 1122 1123 auto select = [&](Value cond, Value t, Value f) -> Value { 1124 return builder.create<arith::SelectOp>(cond, t, f); 1125 }; 1126 1127 auto fmla = [&](Value a, Value b, Value c) { 1128 return builder.create<math::FmaOp>(a, b, c); 1129 }; 1130 1131 auto bitwiseOr = [&](Value a, Value b) { 1132 return builder.create<arith::OrIOp>(a, b); 1133 }; 1134 1135 Value twoOverPi = bcast(f32Cst(builder, (float)TWO_OVER_PI)); 1136 Value piOverTwo = bcast(f32Cst(builder, (float)PI_OVER_2)); 1137 1138 Value x = op.getOperand(); 1139 1140 Value k = floor(mul(x, twoOverPi)); 1141 1142 Value y = sub(x, mul(k, piOverTwo)); 1143 1144 Value cstOne = bcast(f32Cst(builder, 1.0)); 1145 Value cstNegativeOne = bcast(f32Cst(builder, -1.0)); 1146 1147 Value cstSC2 = bcast(f32Cst(builder, -0.16666667163372039794921875f)); 1148 Value cstSC4 = bcast(f32Cst(builder, 8.333347737789154052734375e-3f)); 1149 Value cstSC6 = bcast(f32Cst(builder, -1.9842604524455964565277099609375e-4f)); 1150 Value cstSC8 = 1151 bcast(f32Cst(builder, 2.760012648650445044040679931640625e-6f)); 1152 Value cstSC10 = 1153 bcast(f32Cst(builder, -2.50293279435709337121807038784027099609375e-8f)); 1154 1155 Value cstCC2 = bcast(f32Cst(builder, -0.5f)); 1156 Value cstCC4 = bcast(f32Cst(builder, 4.166664183139801025390625e-2f)); 1157 Value cstCC6 = bcast(f32Cst(builder, -1.388833043165504932403564453125e-3f)); 1158 Value cstCC8 = bcast(f32Cst(builder, 2.47562347794882953166961669921875e-5f)); 1159 Value cstCC10 = 1160 bcast(f32Cst(builder, -2.59630184018533327616751194000244140625e-7f)); 1161 1162 Value kMod4 = modulo4(fPToSingedInteger(k)); 1163 1164 Value kR0 = isEqualTo(kMod4, bcast(i32Cst(builder, 0))); 1165 Value kR1 = isEqualTo(kMod4, bcast(i32Cst(builder, 1))); 1166 Value kR2 = isEqualTo(kMod4, bcast(i32Cst(builder, 2))); 1167 Value kR3 = isEqualTo(kMod4, bcast(i32Cst(builder, 3))); 1168 1169 Value sinuseCos = isSine ? bitwiseOr(kR1, kR3) : bitwiseOr(kR0, kR2); 1170 Value negativeRange = isSine ? isGreaterThan(kMod4, bcast(i32Cst(builder, 1))) 1171 : bitwiseOr(kR1, kR2); 1172 1173 Value y2 = mul(y, y); 1174 1175 Value base = select(sinuseCos, cstOne, y); 1176 Value cstC2 = select(sinuseCos, cstCC2, cstSC2); 1177 Value cstC4 = select(sinuseCos, cstCC4, cstSC4); 1178 Value cstC6 = select(sinuseCos, cstCC6, cstSC6); 1179 Value cstC8 = select(sinuseCos, cstCC8, cstSC8); 1180 Value cstC10 = select(sinuseCos, cstCC10, cstSC10); 1181 1182 Value v1 = fmla(y2, cstC10, cstC8); 1183 Value v2 = fmla(y2, v1, cstC6); 1184 Value v3 = fmla(y2, v2, cstC4); 1185 Value v4 = fmla(y2, v3, cstC2); 1186 Value v5 = fmla(y2, v4, cstOne); 1187 Value v6 = mul(base, v5); 1188 1189 Value approximation = select(negativeRange, mul(cstNegativeOne, v6), v6); 1190 1191 rewriter.replaceOp(op, approximation); 1192 1193 return success(); 1194 } 1195 1196 //----------------------------------------------------------------------------// 1197 // Rsqrt approximation. 1198 //----------------------------------------------------------------------------// 1199 1200 namespace { 1201 struct RsqrtApproximation : public OpRewritePattern<math::RsqrtOp> { 1202 using OpRewritePattern::OpRewritePattern; 1203 1204 LogicalResult matchAndRewrite(math::RsqrtOp op, 1205 PatternRewriter &rewriter) const final; 1206 }; 1207 } // namespace 1208 1209 LogicalResult 1210 RsqrtApproximation::matchAndRewrite(math::RsqrtOp op, 1211 PatternRewriter &rewriter) const { 1212 if (!getElementTypeOrSelf(op.getOperand()).isF32()) 1213 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 1214 1215 ArrayRef<int64_t> shape = vectorShape(op.getOperand()); 1216 1217 // Only support already-vectorized rsqrt's. 1218 if (shape.empty() || shape.back() % 8 != 0) 1219 return rewriter.notifyMatchFailure(op, "unsupported operand type"); 1220 1221 ImplicitLocOpBuilder builder(op->getLoc(), rewriter); 1222 auto bcast = [&](Value value) -> Value { 1223 return broadcast(builder, value, shape); 1224 }; 1225 1226 Value cstPosInf = bcast(f32FromBits(builder, 0x7f800000u)); 1227 Value cstOnePointFive = bcast(f32Cst(builder, 1.5f)); 1228 Value cstNegHalf = bcast(f32Cst(builder, -0.5f)); 1229 Value cstMinNormPos = bcast(f32FromBits(builder, 0x00800000u)); 1230 1231 Value negHalf = builder.create<arith::MulFOp>(op.getOperand(), cstNegHalf); 1232 1233 // Select only the inverse sqrt of positive normals (denormals are 1234 // flushed to zero). 1235 Value ltMinMask = builder.create<arith::CmpFOp>( 1236 arith::CmpFPredicate::OLT, op.getOperand(), cstMinNormPos); 1237 Value infMask = builder.create<arith::CmpFOp>(arith::CmpFPredicate::OEQ, 1238 op.getOperand(), cstPosInf); 1239 Value notNormalFiniteMask = builder.create<arith::OrIOp>(ltMinMask, infMask); 1240 1241 // Compute an approximate result. 1242 Value yApprox = handleMultidimensionalVectors( 1243 builder, op->getOperands(), 8, [&builder](ValueRange operands) -> Value { 1244 return builder.create<x86vector::RsqrtOp>(operands); 1245 }); 1246 1247 // Do a single step of Newton-Raphson iteration to improve the approximation. 1248 // This uses the formula y_{n+1} = y_n * (1.5 - y_n * (0.5 * x) * y_n). 1249 // It is essential to evaluate the inner term like this because forming 1250 // y_n^2 may over- or underflow. 1251 Value inner = builder.create<arith::MulFOp>(negHalf, yApprox); 1252 Value fma = builder.create<math::FmaOp>(yApprox, inner, cstOnePointFive); 1253 Value yNewton = builder.create<arith::MulFOp>(yApprox, fma); 1254 1255 // Select the result of the Newton-Raphson step for positive normal arguments. 1256 // For other arguments, choose the output of the intrinsic. This will 1257 // return rsqrt(+inf) = 0, rsqrt(x) = NaN if x < 0, and rsqrt(x) = +inf if 1258 // x is zero or a positive denormalized float (equivalent to flushing positive 1259 // denormalized inputs to zero). 1260 Value res = 1261 builder.create<arith::SelectOp>(notNormalFiniteMask, yApprox, yNewton); 1262 rewriter.replaceOp(op, res); 1263 1264 return success(); 1265 } 1266 1267 //----------------------------------------------------------------------------// 1268 1269 void mlir::populateMathPolynomialApproximationPatterns( 1270 RewritePatternSet &patterns, 1271 const MathPolynomialApproximationOptions &options) { 1272 patterns.add<AtanApproximation, Atan2Approximation, TanhApproximation, 1273 LogApproximation, Log2Approximation, Log1pApproximation, 1274 ErfPolynomialApproximation, ExpApproximation, ExpM1Approximation, 1275 ReuseF32Expansion<math::Atan2Op>, 1276 SinAndCosApproximation<true, math::SinOp>, 1277 SinAndCosApproximation<false, math::CosOp>>( 1278 patterns.getContext()); 1279 if (options.enableAvx2) 1280 patterns.add<RsqrtApproximation>(patterns.getContext()); 1281 } 1282