1 //===- InstCombineAtomicRMW.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for atomic rmw instructions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/IR/Instructions.h"
15 
16 using namespace llvm;
17 
18 namespace {
19 /// Return true if and only if the given instruction does not modify the memory
20 /// location referenced.  Note that an idemptent atomicrmw may still have
21 /// ordering effects on nearby instructions, or be volatile.
22 /// TODO: Common w/ the version in AtomicExpandPass, and change the term used.
23 /// Idemptotent is confusing in this context.
24 bool isIdempotentRMW(AtomicRMWInst& RMWI) {
25   if (auto CF = dyn_cast<ConstantFP>(RMWI.getValOperand()))
26     switch(RMWI.getOperation()) {
27     case AtomicRMWInst::FAdd: // -0.0
28       return CF->isZero() && CF->isNegative();
29     case AtomicRMWInst::FSub: // +0.0
30       return CF->isZero() && !CF->isNegative();
31     default:
32       return false;
33     };
34 
35   auto C = dyn_cast<ConstantInt>(RMWI.getValOperand());
36   if(!C)
37     return false;
38 
39   switch(RMWI.getOperation()) {
40     case AtomicRMWInst::Add:
41     case AtomicRMWInst::Sub:
42     case AtomicRMWInst::Or:
43     case AtomicRMWInst::Xor:
44       return C->isZero();
45     case AtomicRMWInst::And:
46       return C->isMinusOne();
47     case AtomicRMWInst::Min:
48       return C->isMaxValue(true);
49     case AtomicRMWInst::Max:
50       return C->isMinValue(true);
51     case AtomicRMWInst::UMin:
52       return C->isMaxValue(false);
53     case AtomicRMWInst::UMax:
54       return C->isMinValue(false);
55     default:
56       return false;
57   }
58 }
59 
60 /// Return true if the given instruction always produces a value in memory
61 /// equivalent to its value operand.
62 bool isSaturating(AtomicRMWInst& RMWI) {
63   if (auto CF = dyn_cast<ConstantFP>(RMWI.getValOperand()))
64     switch(RMWI.getOperation()) {
65     case AtomicRMWInst::FAdd:
66     case AtomicRMWInst::FSub:
67       return CF->isNaN();
68     default:
69       return false;
70     };
71 
72   auto C = dyn_cast<ConstantInt>(RMWI.getValOperand());
73   if(!C)
74     return false;
75 
76   switch(RMWI.getOperation()) {
77   default:
78     return false;
79   case AtomicRMWInst::Xchg:
80     return true;
81   case AtomicRMWInst::Or:
82     return C->isAllOnesValue();
83   case AtomicRMWInst::And:
84     return C->isZero();
85   case AtomicRMWInst::Min:
86     return C->isMinValue(true);
87   case AtomicRMWInst::Max:
88     return C->isMaxValue(true);
89   case AtomicRMWInst::UMin:
90     return C->isMinValue(false);
91   case AtomicRMWInst::UMax:
92     return C->isMaxValue(false);
93   };
94 }
95 } // namespace
96 
97 Instruction *InstCombinerImpl::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
98 
99   // Volatile RMWs perform a load and a store, we cannot replace this by just a
100   // load or just a store. We chose not to canonicalize out of general paranoia
101   // about user expectations around volatile.
102   if (RMWI.isVolatile())
103     return nullptr;
104 
105   // Any atomicrmw op which produces a known result in memory can be
106   // replaced w/an atomicrmw xchg.
107   if (isSaturating(RMWI) &&
108       RMWI.getOperation() != AtomicRMWInst::Xchg) {
109     RMWI.setOperation(AtomicRMWInst::Xchg);
110     return &RMWI;
111   }
112 
113   AtomicOrdering Ordering = RMWI.getOrdering();
114   assert(Ordering != AtomicOrdering::NotAtomic &&
115          Ordering != AtomicOrdering::Unordered &&
116          "AtomicRMWs don't make sense with Unordered or NotAtomic");
117 
118   // Any atomicrmw xchg with no uses can be converted to a atomic store if the
119   // ordering is compatible.
120   if (RMWI.getOperation() == AtomicRMWInst::Xchg &&
121       RMWI.use_empty()) {
122     if (Ordering != AtomicOrdering::Release &&
123         Ordering != AtomicOrdering::Monotonic)
124       return nullptr;
125     auto *SI = new StoreInst(RMWI.getValOperand(),
126                              RMWI.getPointerOperand(), &RMWI);
127     SI->setAtomic(Ordering, RMWI.getSyncScopeID());
128     SI->setAlignment(DL.getABITypeAlign(RMWI.getType()));
129     return eraseInstFromFunction(RMWI);
130   }
131 
132   if (!isIdempotentRMW(RMWI))
133     return nullptr;
134 
135   // We chose to canonicalize all idempotent operations to an single
136   // operation code and constant.  This makes it easier for the rest of the
137   // optimizer to match easily.  The choices of or w/0 and fadd w/-0.0 are
138   // arbitrary.
139   if (RMWI.getType()->isIntegerTy() &&
140       RMWI.getOperation() != AtomicRMWInst::Or) {
141     RMWI.setOperation(AtomicRMWInst::Or);
142     return replaceOperand(RMWI, 1, ConstantInt::get(RMWI.getType(), 0));
143   } else if (RMWI.getType()->isFloatingPointTy() &&
144              RMWI.getOperation() != AtomicRMWInst::FAdd) {
145     RMWI.setOperation(AtomicRMWInst::FAdd);
146     return replaceOperand(RMWI, 1, ConstantFP::getNegativeZero(RMWI.getType()));
147   }
148 
149   // Check if the required ordering is compatible with an atomic load.
150   if (Ordering != AtomicOrdering::Acquire &&
151       Ordering != AtomicOrdering::Monotonic)
152     return nullptr;
153 
154   LoadInst *Load = new LoadInst(RMWI.getType(), RMWI.getPointerOperand(), "",
155                                 false, DL.getABITypeAlign(RMWI.getType()),
156                                 Ordering, RMWI.getSyncScopeID());
157   return Load;
158 }
159