1 //===- MemoryMapper.cpp - Cross-process memory mapper ------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/ExecutionEngine/Orc/MemoryMapper.h"
10
11 #include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
12 #include "llvm/Support/WindowsError.h"
13
14 #if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
15 #include <fcntl.h>
16 #include <sys/mman.h>
17 #include <unistd.h>
18 #elif defined(_WIN32)
19 #include <windows.h>
20 #endif
21
22 namespace llvm {
23 namespace orc {
24
~MemoryMapper()25 MemoryMapper::~MemoryMapper() {}
26
InProcessMemoryMapper(size_t PageSize)27 InProcessMemoryMapper::InProcessMemoryMapper(size_t PageSize)
28 : PageSize(PageSize) {}
29
30 Expected<std::unique_ptr<InProcessMemoryMapper>>
Create()31 InProcessMemoryMapper::Create() {
32 auto PageSize = sys::Process::getPageSize();
33 if (!PageSize)
34 return PageSize.takeError();
35 return std::make_unique<InProcessMemoryMapper>(*PageSize);
36 }
37
reserve(size_t NumBytes,OnReservedFunction OnReserved)38 void InProcessMemoryMapper::reserve(size_t NumBytes,
39 OnReservedFunction OnReserved) {
40 std::error_code EC;
41 auto MB = sys::Memory::allocateMappedMemory(
42 NumBytes, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
43
44 if (EC)
45 return OnReserved(errorCodeToError(EC));
46
47 {
48 std::lock_guard<std::mutex> Lock(Mutex);
49 Reservations[MB.base()].Size = MB.allocatedSize();
50 }
51
52 OnReserved(
53 ExecutorAddrRange(ExecutorAddr::fromPtr(MB.base()), MB.allocatedSize()));
54 }
55
prepare(ExecutorAddr Addr,size_t ContentSize)56 char *InProcessMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
57 return Addr.toPtr<char *>();
58 }
59
initialize(MemoryMapper::AllocInfo & AI,OnInitializedFunction OnInitialized)60 void InProcessMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
61 OnInitializedFunction OnInitialized) {
62 ExecutorAddr MinAddr(~0ULL);
63
64 for (auto &Segment : AI.Segments) {
65 auto Base = AI.MappingBase + Segment.Offset;
66 auto Size = Segment.ContentSize + Segment.ZeroFillSize;
67
68 if (Base < MinAddr)
69 MinAddr = Base;
70
71 std::memset((Base + Segment.ContentSize).toPtr<void *>(), 0,
72 Segment.ZeroFillSize);
73
74 if (auto EC = sys::Memory::protectMappedMemory({Base.toPtr<void *>(), Size},
75 Segment.Prot)) {
76 return OnInitialized(errorCodeToError(EC));
77 }
78 if (Segment.Prot & sys::Memory::MF_EXEC)
79 sys::Memory::InvalidateInstructionCache(Base.toPtr<void *>(), Size);
80 }
81
82 auto DeinitializeActions = shared::runFinalizeActions(AI.Actions);
83 if (!DeinitializeActions)
84 return OnInitialized(DeinitializeActions.takeError());
85
86 {
87 std::lock_guard<std::mutex> Lock(Mutex);
88 Allocations[MinAddr].DeinitializationActions =
89 std::move(*DeinitializeActions);
90 Reservations[AI.MappingBase.toPtr<void *>()].Allocations.push_back(MinAddr);
91 }
92
93 OnInitialized(MinAddr);
94 }
95
deinitialize(ArrayRef<ExecutorAddr> Bases,MemoryMapper::OnDeinitializedFunction OnDeinitialized)96 void InProcessMemoryMapper::deinitialize(
97 ArrayRef<ExecutorAddr> Bases,
98 MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
99 Error AllErr = Error::success();
100
101 {
102 std::lock_guard<std::mutex> Lock(Mutex);
103
104 for (auto Base : Bases) {
105
106 if (Error Err = shared::runDeallocActions(
107 Allocations[Base].DeinitializationActions)) {
108 AllErr = joinErrors(std::move(AllErr), std::move(Err));
109 }
110
111 Allocations.erase(Base);
112 }
113 }
114
115 OnDeinitialized(std::move(AllErr));
116 }
117
release(ArrayRef<ExecutorAddr> Bases,OnReleasedFunction OnReleased)118 void InProcessMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
119 OnReleasedFunction OnReleased) {
120 Error Err = Error::success();
121
122 for (auto Base : Bases) {
123 std::vector<ExecutorAddr> AllocAddrs;
124 size_t Size;
125 {
126 std::lock_guard<std::mutex> Lock(Mutex);
127 auto &R = Reservations[Base.toPtr<void *>()];
128 Size = R.Size;
129 AllocAddrs.swap(R.Allocations);
130 }
131
132 // deinitialize sub allocations
133 std::promise<MSVCPError> P;
134 auto F = P.get_future();
135 deinitialize(AllocAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
136 if (Error E = F.get()) {
137 Err = joinErrors(std::move(Err), std::move(E));
138 }
139
140 // free the memory
141 auto MB = sys::MemoryBlock(Base.toPtr<void *>(), Size);
142
143 auto EC = sys::Memory::releaseMappedMemory(MB);
144 if (EC) {
145 Err = joinErrors(std::move(Err), errorCodeToError(EC));
146 }
147
148 std::lock_guard<std::mutex> Lock(Mutex);
149 Reservations.erase(Base.toPtr<void *>());
150 }
151
152 OnReleased(std::move(Err));
153 }
154
~InProcessMemoryMapper()155 InProcessMemoryMapper::~InProcessMemoryMapper() {
156 std::vector<ExecutorAddr> ReservationAddrs;
157 {
158 std::lock_guard<std::mutex> Lock(Mutex);
159
160 ReservationAddrs.reserve(Reservations.size());
161 for (const auto &R : Reservations) {
162 ReservationAddrs.push_back(ExecutorAddr::fromPtr(R.getFirst()));
163 }
164 }
165
166 std::promise<MSVCPError> P;
167 auto F = P.get_future();
168 release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
169 cantFail(F.get());
170 }
171
172 // SharedMemoryMapper
173
SharedMemoryMapper(ExecutorProcessControl & EPC,SymbolAddrs SAs,size_t PageSize)174 SharedMemoryMapper::SharedMemoryMapper(ExecutorProcessControl &EPC,
175 SymbolAddrs SAs, size_t PageSize)
176 : EPC(EPC), SAs(SAs), PageSize(PageSize) {
177 #if (!defined(LLVM_ON_UNIX) || defined(__ANDROID__)) && !defined(_WIN32)
178 llvm_unreachable("SharedMemoryMapper is not supported on this platform yet");
179 #endif
180 }
181
182 Expected<std::unique_ptr<SharedMemoryMapper>>
Create(ExecutorProcessControl & EPC,SymbolAddrs SAs)183 SharedMemoryMapper::Create(ExecutorProcessControl &EPC, SymbolAddrs SAs) {
184 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
185 auto PageSize = sys::Process::getPageSize();
186 if (!PageSize)
187 return PageSize.takeError();
188
189 return std::make_unique<SharedMemoryMapper>(EPC, SAs, *PageSize);
190 #else
191 return make_error<StringError>(
192 "SharedMemoryMapper is not supported on this platform yet",
193 inconvertibleErrorCode());
194 #endif
195 }
196
reserve(size_t NumBytes,OnReservedFunction OnReserved)197 void SharedMemoryMapper::reserve(size_t NumBytes,
198 OnReservedFunction OnReserved) {
199 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
200
201 EPC.callSPSWrapperAsync<
202 rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>(
203 SAs.Reserve,
204 [this, NumBytes, OnReserved = std::move(OnReserved)](
205 Error SerializationErr,
206 Expected<std::pair<ExecutorAddr, std::string>> Result) mutable {
207 if (SerializationErr) {
208 cantFail(Result.takeError());
209 return OnReserved(std::move(SerializationErr));
210 }
211
212 if (!Result)
213 return OnReserved(Result.takeError());
214
215 ExecutorAddr RemoteAddr;
216 std::string SharedMemoryName;
217 std::tie(RemoteAddr, SharedMemoryName) = std::move(*Result);
218
219 void *LocalAddr = nullptr;
220
221 #if defined(LLVM_ON_UNIX)
222
223 int SharedMemoryFile = shm_open(SharedMemoryName.c_str(), O_RDWR, 0700);
224 if (SharedMemoryFile < 0) {
225 return OnReserved(errorCodeToError(
226 std::error_code(errno, std::generic_category())));
227 }
228
229 // this prevents other processes from accessing it by name
230 shm_unlink(SharedMemoryName.c_str());
231
232 LocalAddr = mmap(nullptr, NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED,
233 SharedMemoryFile, 0);
234 if (LocalAddr == MAP_FAILED) {
235 return OnReserved(errorCodeToError(
236 std::error_code(errno, std::generic_category())));
237 }
238
239 close(SharedMemoryFile);
240
241 #elif defined(_WIN32)
242
243 std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
244 SharedMemoryName.end());
245 HANDLE SharedMemoryFile = OpenFileMappingW(
246 FILE_MAP_ALL_ACCESS, FALSE, WideSharedMemoryName.c_str());
247 if (!SharedMemoryFile)
248 return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
249
250 LocalAddr =
251 MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
252 if (!LocalAddr) {
253 CloseHandle(SharedMemoryFile);
254 return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
255 }
256
257 CloseHandle(SharedMemoryFile);
258
259 #endif
260 {
261 std::lock_guard<std::mutex> Lock(Mutex);
262 Reservations.insert({RemoteAddr, {LocalAddr, NumBytes}});
263 }
264
265 OnReserved(ExecutorAddrRange(RemoteAddr, NumBytes));
266 },
267 SAs.Instance, static_cast<uint64_t>(NumBytes));
268
269 #else
270 OnReserved(make_error<StringError>(
271 "SharedMemoryMapper is not supported on this platform yet",
272 inconvertibleErrorCode()));
273 #endif
274 }
275
prepare(ExecutorAddr Addr,size_t ContentSize)276 char *SharedMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
277 auto R = Reservations.upper_bound(Addr);
278 assert(R != Reservations.begin() && "Attempt to prepare unknown range");
279 R--;
280
281 ExecutorAddrDiff Offset = Addr - R->first;
282
283 return static_cast<char *>(R->second.LocalAddr) + Offset;
284 }
285
initialize(MemoryMapper::AllocInfo & AI,OnInitializedFunction OnInitialized)286 void SharedMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
287 OnInitializedFunction OnInitialized) {
288 auto Reservation = Reservations.find(AI.MappingBase);
289 assert(Reservation != Reservations.end() &&
290 "Attempt to initialize unreserved range");
291
292 tpctypes::SharedMemoryFinalizeRequest FR;
293
294 AI.Actions.swap(FR.Actions);
295
296 FR.Segments.reserve(AI.Segments.size());
297
298 for (auto Segment : AI.Segments) {
299 char *Base =
300 static_cast<char *>(Reservation->second.LocalAddr) + Segment.Offset;
301 std::memset(Base + Segment.ContentSize, 0, Segment.ZeroFillSize);
302
303 tpctypes::SharedMemorySegFinalizeRequest SegReq;
304 SegReq.Prot = tpctypes::toWireProtectionFlags(
305 static_cast<sys::Memory::ProtectionFlags>(Segment.Prot));
306 SegReq.Addr = AI.MappingBase + Segment.Offset;
307 SegReq.Size = Segment.ContentSize + Segment.ZeroFillSize;
308
309 FR.Segments.push_back(SegReq);
310 }
311
312 EPC.callSPSWrapperAsync<
313 rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>(
314 SAs.Initialize,
315 [OnInitialized = std::move(OnInitialized)](
316 Error SerializationErr, Expected<ExecutorAddr> Result) mutable {
317 if (SerializationErr) {
318 cantFail(Result.takeError());
319 return OnInitialized(std::move(SerializationErr));
320 }
321
322 OnInitialized(std::move(Result));
323 },
324 SAs.Instance, AI.MappingBase, std::move(FR));
325 }
326
deinitialize(ArrayRef<ExecutorAddr> Allocations,MemoryMapper::OnDeinitializedFunction OnDeinitialized)327 void SharedMemoryMapper::deinitialize(
328 ArrayRef<ExecutorAddr> Allocations,
329 MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
330 EPC.callSPSWrapperAsync<
331 rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>(
332 SAs.Deinitialize,
333 [OnDeinitialized = std::move(OnDeinitialized)](Error SerializationErr,
334 Error Result) mutable {
335 if (SerializationErr) {
336 cantFail(std::move(Result));
337 return OnDeinitialized(std::move(SerializationErr));
338 }
339
340 OnDeinitialized(std::move(Result));
341 },
342 SAs.Instance, Allocations);
343 }
344
release(ArrayRef<ExecutorAddr> Bases,OnReleasedFunction OnReleased)345 void SharedMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
346 OnReleasedFunction OnReleased) {
347 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
348 Error Err = Error::success();
349
350 {
351 std::lock_guard<std::mutex> Lock(Mutex);
352
353 for (auto Base : Bases) {
354
355 #if defined(LLVM_ON_UNIX)
356
357 if (munmap(Reservations[Base].LocalAddr, Reservations[Base].Size) != 0)
358 Err = joinErrors(std::move(Err), errorCodeToError(std::error_code(
359 errno, std::generic_category())));
360
361 #elif defined(_WIN32)
362
363 if (!UnmapViewOfFile(Reservations[Base].LocalAddr))
364 Err = joinErrors(std::move(Err),
365 errorCodeToError(mapWindowsError(GetLastError())));
366
367 #endif
368
369 Reservations.erase(Base);
370 }
371 }
372
373 EPC.callSPSWrapperAsync<
374 rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>(
375 SAs.Release,
376 [OnReleased = std::move(OnReleased),
377 Err = std::move(Err)](Error SerializationErr, Error Result) mutable {
378 if (SerializationErr) {
379 cantFail(std::move(Result));
380 return OnReleased(
381 joinErrors(std::move(Err), std::move(SerializationErr)));
382 }
383
384 return OnReleased(joinErrors(std::move(Err), std::move(Result)));
385 },
386 SAs.Instance, Bases);
387 #else
388 OnReleased(make_error<StringError>(
389 "SharedMemoryMapper is not supported on this platform yet",
390 inconvertibleErrorCode()));
391 #endif
392 }
393
~SharedMemoryMapper()394 SharedMemoryMapper::~SharedMemoryMapper() {
395 std::vector<ExecutorAddr> ReservationAddrs;
396 if (!Reservations.empty()) {
397 std::lock_guard<std::mutex> Lock(Mutex);
398 {
399 ReservationAddrs.reserve(Reservations.size());
400 for (const auto &R : Reservations) {
401 ReservationAddrs.push_back(R.first);
402 }
403 }
404 }
405
406 std::promise<MSVCPError> P;
407 auto F = P.get_future();
408 release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
409 // FIXME: Release can actually fail. The error should be propagated.
410 // Meanwhile, a better option is to explicitly call release().
411 cantFail(F.get());
412 }
413
414 } // namespace orc
415
416 } // namespace llvm
417