1 /*
2     Copyright (c) 2005-2022 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_concurrent_lru_cache_H
18 #define __TBB_concurrent_lru_cache_H
19 
20 #if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE
21     #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h
22 #endif
23 
24 #include "detail/_assert.h"
25 #include "detail/_aggregator.h"
26 
27 #include <map>       // for std::map
28 #include <list>      // for std::list
29 #include <utility>   // for std::make_pair
30 #include <algorithm> // for std::find
31 #include <atomic>    // for std::atomic<bool>
32 
33 namespace tbb {
34 
35 namespace detail {
36 namespace d1 {
37 
38 //-----------------------------------------------------------------------------
39 // Concurrent LRU cache
40 //-----------------------------------------------------------------------------
41 
42 template<typename KeyT, typename ValT, typename KeyToValFunctorT = ValT (*) (KeyT)>
43 class concurrent_lru_cache : no_assign {
44 // incapsulated helper classes
45 private:
46     struct handle_object;
47     struct storage_map_value_type;
48 
49     struct aggregator_operation;
50     struct retrieve_aggregator_operation;
51     struct signal_end_of_usage_aggregator_operation;
52 
53 // typedefs
54 public:
55     using key_type = KeyT;
56     using value_type = ValT;
57     using pointer = ValT*;
58     using reference = ValT&;
59     using const_pointer = const ValT*;
60     using const_reference = const ValT&;
61 
62     using value_function_type = KeyToValFunctorT;
63     using handle = handle_object;
64 private:
65     using lru_cache_type = concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>;
66 
67     using storage_map_type = std::map<key_type, storage_map_value_type>;
68     using storage_map_iterator_type = typename storage_map_type::iterator;
69     using storage_map_pointer_type = typename storage_map_type::pointer;
70     using storage_map_reference_type = typename storage_map_type::reference;
71 
72     using history_list_type = std::list<storage_map_iterator_type>;
73     using history_list_iterator_type = typename history_list_type::iterator;
74 
75     using aggregator_operation_type = aggregator_operation;
76     using aggregator_function_type = aggregating_functor<lru_cache_type, aggregator_operation_type>;
77     using aggregator_type = aggregator<aggregator_function_type, aggregator_operation_type>;
78 
79     friend class aggregating_functor<lru_cache_type,aggregator_operation_type>;
80 
81 // fields
82 private:
83     value_function_type my_value_function;
84     aggregator_type my_aggregator;
85 
86     storage_map_type my_storage_map;            // storage map for used objects
87     history_list_type my_history_list;          // history list for unused objects
88     const std::size_t my_history_list_capacity; // history list's allowed capacity
89 
90 // interface
91 public:
92 
concurrent_lru_cache(value_function_type value_function,std::size_t cache_capacity)93     concurrent_lru_cache(value_function_type value_function, std::size_t cache_capacity)
94         : my_value_function(value_function), my_history_list_capacity(cache_capacity) {
95         my_aggregator.initialize_handler(aggregator_function_type(this));
96     }
97 
98     handle operator[](key_type key) {
99         retrieve_aggregator_operation op(key);
100         my_aggregator.execute(&op);
101 
102         if (op.is_new_value_needed()) {
103             op.result().second.my_value = my_value_function(key);
104             op.result().second.my_is_ready.store(true, std::memory_order_release);
105         } else {
106             spin_wait_while_eq(op.result().second.my_is_ready, false);
107         }
108 
109         return handle(*this, op.result());
110     }
111 
112 private:
113 
handle_operations(aggregator_operation * op_list)114     void handle_operations(aggregator_operation* op_list) {
115         while (op_list) {
116             op_list->cast_and_handle(*this);
117             aggregator_operation* prev_op = op_list;
118             op_list = op_list->next;
119 
120             (prev_op->status).store(1, std::memory_order_release);
121         }
122     }
123 
signal_end_of_usage(storage_map_reference_type map_record_ref)124     void signal_end_of_usage(storage_map_reference_type map_record_ref) {
125         signal_end_of_usage_aggregator_operation op(map_record_ref);
126         my_aggregator.execute(&op);
127     }
128 
signal_end_of_usage_serial(storage_map_reference_type map_record_ref)129     void signal_end_of_usage_serial(storage_map_reference_type map_record_ref) {
130         storage_map_iterator_type map_it = my_storage_map.find(map_record_ref.first);
131 
132         __TBB_ASSERT(map_it != my_storage_map.end(),
133             "cache should not return past-end iterators to outer world");
134         __TBB_ASSERT(&(*map_it) == &map_record_ref,
135             "dangling reference has been returned to outside world: data race?");
136         __TBB_ASSERT(std::find(my_history_list.begin(), my_history_list.end(), map_it) == my_history_list.end(),
137             "object in use should not be in list of unused objects ");
138 
139         // if it was the last reference, put it to the LRU history
140         if (! --(map_it->second.my_ref_counter)) {
141             // if the LRU history is full, evict the oldest items to get space
142             if (my_history_list.size() >= my_history_list_capacity) {
143                 if (my_history_list_capacity == 0) {
144                     // Since LRU history capacity is zero, there is no need to keep the element in history
145                     my_storage_map.erase(map_it);
146                     return;
147                 }
148                 std::size_t number_of_elements_to_evict = 1 + my_history_list.size() - my_history_list_capacity;
149 
150                 for (std::size_t i = 0; i < number_of_elements_to_evict; ++i) {
151                     storage_map_iterator_type map_it_to_evict = my_history_list.back();
152 
153                     __TBB_ASSERT(map_it_to_evict->second.my_ref_counter == 0,
154                         "item to be evicted should not have a live references");
155 
156                     // TODO: can we use forward_list instead of list? pop_front / insert_after last
157                     my_history_list.pop_back();
158                     my_storage_map.erase(map_it_to_evict);
159                 }
160             }
161 
162             // TODO: can we use forward_list instead of list? pop_front / insert_after last
163             my_history_list.push_front(map_it);
164             map_it->second.my_history_list_iterator = my_history_list.begin();
165         }
166     }
167 
retrieve_serial(key_type key,bool & is_new_value_needed)168     storage_map_reference_type retrieve_serial(key_type key, bool& is_new_value_needed) {
169         storage_map_iterator_type map_it = my_storage_map.find(key);
170 
171         if (map_it == my_storage_map.end()) {
172             map_it = my_storage_map.emplace_hint(
173                 map_it, std::piecewise_construct, std::make_tuple(key), std::make_tuple(value_type(), 0, my_history_list.end(), false));
174             is_new_value_needed = true;
175         } else {
176             history_list_iterator_type list_it = map_it->second.my_history_list_iterator;
177             if (list_it != my_history_list.end()) {
178                 __TBB_ASSERT(map_it->second.my_ref_counter == 0,
179                     "item to be evicted should not have a live references");
180 
181                 // Item is going to be used. Therefore it is not a subject for eviction,
182                 // so we remove it from LRU history.
183                 my_history_list.erase(list_it);
184                 map_it->second.my_history_list_iterator = my_history_list.end();
185             }
186         }
187 
188         ++(map_it->second.my_ref_counter);
189         return *map_it;
190     }
191 };
192 
193 //-----------------------------------------------------------------------------
194 // Value type for storage map in concurrent LRU cache
195 //-----------------------------------------------------------------------------
196 
197 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
198 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::storage_map_value_type {
199 //typedefs
200 public:
201     using ref_counter_type = std::size_t;
202 
203 // fields
204 public:
205     value_type my_value;
206     ref_counter_type my_ref_counter;
207     history_list_iterator_type my_history_list_iterator;
208     std::atomic<bool> my_is_ready;
209 
210 // interface
211 public:
212     storage_map_value_type(
213         value_type const& value, ref_counter_type ref_counter,
214         history_list_iterator_type history_list_iterator, bool is_ready)
215         : my_value(value), my_ref_counter(ref_counter),
216           my_history_list_iterator(history_list_iterator), my_is_ready(is_ready) {}
217 };
218 
219 //-----------------------------------------------------------------------------
220 // Handle object for operator[] in concurrent LRU cache
221 //-----------------------------------------------------------------------------
222 
223 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
224 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::handle_object {
225 // fields
226 private:
227     lru_cache_type* my_lru_cache_ptr;
228     storage_map_pointer_type my_map_record_ptr;
229 
230 // interface
231 public:
232     handle_object()
233         : my_lru_cache_ptr(nullptr), my_map_record_ptr(nullptr) {}
234     handle_object(lru_cache_type& lru_cache_ref, storage_map_reference_type map_record_ref)
235         : my_lru_cache_ptr(&lru_cache_ref), my_map_record_ptr(&map_record_ref) {}
236 
237     handle_object(handle_object&) = delete;
238     void operator=(handle_object&) = delete;
239 
240     handle_object(handle_object&& other)
241         : my_lru_cache_ptr(other.my_lru_cache_ptr), my_map_record_ptr(other.my_map_record_ptr) {
242 
243         __TBB_ASSERT(
244             (other.my_lru_cache_ptr != nullptr && other.my_map_record_ptr != nullptr) ||
245             (other.my_lru_cache_ptr == nullptr && other.my_map_record_ptr == nullptr),
246             "invalid state of moving object?");
247 
248         other.my_lru_cache_ptr = nullptr;
249         other.my_map_record_ptr = nullptr;
250     }
251 
252     handle_object& operator=(handle_object&& other) {
253         __TBB_ASSERT(
254             (other.my_lru_cache_ptr != nullptr && other.my_map_record_ptr != nullptr) ||
255             (other.my_lru_cache_ptr == nullptr && other.my_map_record_ptr == nullptr),
256             "invalid state of moving object?");
257 
258         if (my_lru_cache_ptr)
259             my_lru_cache_ptr->signal_end_of_usage(*my_map_record_ptr);
260 
261         my_lru_cache_ptr = other.my_lru_cache_ptr;
262         my_map_record_ptr = other.my_map_record_ptr;
263         other.my_lru_cache_ptr = nullptr;
264         other.my_map_record_ptr = nullptr;
265 
266         return *this;
267     }
268 
269     ~handle_object() {
270         if (my_lru_cache_ptr)
271             my_lru_cache_ptr->signal_end_of_usage(*my_map_record_ptr);
272     }
273 
274     operator bool() const {
275         return (my_lru_cache_ptr && my_map_record_ptr);
276     }
277 
278     value_type& value() {
279         __TBB_ASSERT(my_lru_cache_ptr, "get value from already moved object?");
280         __TBB_ASSERT(my_map_record_ptr, "get value from an invalid or already moved object?");
281 
282         return my_map_record_ptr->second.my_value;
283     }
284 };
285 
286 //-----------------------------------------------------------------------------
287 // Aggregator operation for aggregator type in concurrent LRU cache
288 //-----------------------------------------------------------------------------
289 
290 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
291 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::aggregator_operation
292     : aggregated_operation<aggregator_operation> {
293 // incapsulated helper classes
294 public:
295     enum class op_type { retrieve, signal_end_of_usage };
296 
297 // fields
298 private:
299     op_type my_op;
300 
301 // interface
302 public:
303     aggregator_operation(op_type op) : my_op(op) {}
304 
305     // TODO: aggregator_operation can be implemented
306     //   - as a statically typed variant type or CRTP? (static, dependent on the use case)
307     //   - or use pointer to function and apply_visitor (dynamic)
308     //   - or use virtual functions (dynamic)
309     void cast_and_handle(lru_cache_type& lru_cache_ref) {
310         if (my_op == op_type::retrieve)
311             static_cast<retrieve_aggregator_operation*>(this)->handle(lru_cache_ref);
312         else
313             static_cast<signal_end_of_usage_aggregator_operation*>(this)->handle(lru_cache_ref);
314     }
315 };
316 
317 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
318 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::retrieve_aggregator_operation
319     : aggregator_operation, private no_assign {
320 public:
321     key_type my_key;
322     storage_map_pointer_type my_map_record_ptr;
323     bool my_is_new_value_needed;
324 
325 public:
326     retrieve_aggregator_operation(key_type key)
327         : aggregator_operation(aggregator_operation::op_type::retrieve),
328           my_key(key), my_map_record_ptr(nullptr), my_is_new_value_needed(false) {}
329 
330     void handle(lru_cache_type& lru_cache_ref) {
331         my_map_record_ptr = &lru_cache_ref.retrieve_serial(my_key, my_is_new_value_needed);
332     }
333 
334     storage_map_reference_type result() {
335         __TBB_ASSERT(my_map_record_ptr, "Attempt to call result() before calling handle()");
336         return *my_map_record_ptr;
337     }
338 
339     bool is_new_value_needed() { return my_is_new_value_needed; }
340 };
341 
342 template<typename KeyT, typename ValT, typename KeyToValFunctorT>
343 struct concurrent_lru_cache<KeyT, ValT, KeyToValFunctorT>::signal_end_of_usage_aggregator_operation
344     : aggregator_operation, private no_assign {
345 
346 private:
347     storage_map_reference_type my_map_record_ref;
348 
349 public:
350     signal_end_of_usage_aggregator_operation(storage_map_reference_type map_record_ref)
351         : aggregator_operation(aggregator_operation::op_type::signal_end_of_usage),
352           my_map_record_ref(map_record_ref) {}
353 
354     void handle(lru_cache_type& lru_cache_ref) {
355         lru_cache_ref.signal_end_of_usage_serial(my_map_record_ref);
356     }
357 };
358 
359 // TODO: if we have guarantees that KeyToValFunctorT always have
360 //       ValT as a return type and KeyT as an argument type
361 //       we can deduce template parameters of concurrent_lru_cache
362 //       by pattern matching on KeyToValFunctorT
363 
364 } // namespace d1
365 } // namespace detail
366 
367 inline namespace v1 {
368 
369 using detail::d1::concurrent_lru_cache;
370 
371 } // inline namespace v1
372 } // namespace tbb
373 
374 #endif // __TBB_concurrent_lru_cache_H
375