1 /*
2 Copyright (c) 2005-2022 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15 */
16
17 #ifndef _itt_shared_malloc_MapMemory_H
18 #define _itt_shared_malloc_MapMemory_H
19
20 #include <stdlib.h>
21
22 #if __unix__ || __APPLE__ || __sun || __FreeBSD__
23
24 #if __sun && !defined(_XPG4_2)
25 // To have void* as mmap's 1st argument
26 #define _XPG4_2 1
27 #define XPG4_WAS_DEFINED 1
28 #endif
29
30 #include <sys/mman.h>
31 #if __unix__
32 /* __TBB_MAP_HUGETLB is MAP_HUGETLB from system header linux/mman.h.
33 The header is not included here, as on some Linux flavors inclusion of
34 linux/mman.h leads to compilation error,
35 while changing of MAP_HUGETLB is highly unexpected.
36 */
37 #define __TBB_MAP_HUGETLB 0x40000
38 #else
39 #define __TBB_MAP_HUGETLB 0
40 #endif
41
42 #if XPG4_WAS_DEFINED
43 #undef _XPG4_2
44 #undef XPG4_WAS_DEFINED
45 #endif
46
47 inline void* mmap_impl(size_t map_size, void* map_hint = nullptr, int map_flags = 0) {
48 #ifndef MAP_ANONYMOUS
49 // macOS* defines MAP_ANON, which is deprecated in Linux*.
50 #define MAP_ANONYMOUS MAP_ANON
51 #endif /* MAP_ANONYMOUS */
52 return mmap(map_hint, map_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | map_flags, -1, 0);
53 }
54
mmapTHP(size_t bytes)55 inline void* mmapTHP(size_t bytes) {
56 // Initializes in zero-initialized data section
57 static void* hint;
58
59 // Optimistically try to use a last huge page aligned region end
60 // as a hint for mmap.
61 hint = hint ? (void*)((uintptr_t)hint - bytes) : hint;
62 void* result = mmap_impl(bytes, hint);
63
64 // Something went wrong
65 if (result == MAP_FAILED) {
66 hint = nullptr;
67 return MAP_FAILED;
68 }
69
70 // Otherwise, fall back to the slow path - map oversized region
71 // and trim excess parts.
72 if (!isAligned(result, HUGE_PAGE_SIZE)) {
73 // Undo previous try
74 munmap(result, bytes);
75
76 // Map oversized on huge page size region
77 result = mmap_impl(bytes + HUGE_PAGE_SIZE);
78
79 // Something went wrong
80 if (result == MAP_FAILED) {
81 hint = nullptr;
82 return MAP_FAILED;
83 }
84
85 // Misalignment offset
86 uintptr_t offset = 0;
87
88 if (!isAligned(result, HUGE_PAGE_SIZE)) {
89 // Trim excess head of a region if it is no aligned
90 offset = HUGE_PAGE_SIZE - ((uintptr_t)result & (HUGE_PAGE_SIZE - 1));
91 munmap(result, offset);
92
93 // New region beginning
94 result = (void*)((uintptr_t)result + offset);
95 }
96
97 // Trim excess tail of a region
98 munmap((void*)((uintptr_t)result + bytes), HUGE_PAGE_SIZE - offset);
99 }
100
101 // Assume, that mmap virtual addresses grow down by default
102 // So, set a hint as a result of a last successful allocation
103 // and then use it minus requested size as a new mapping point.
104 // TODO: Atomic store is meant here, fence not needed, but
105 // currently we don't have such function.
106 hint = result;
107
108 MALLOC_ASSERT(isAligned(result, HUGE_PAGE_SIZE), "Mapped address is not aligned on huge page size.");
109
110 return result;
111 }
112
113 #define MEMORY_MAPPING_USES_MALLOC 0
MapMemory(size_t bytes,PageType pageType)114 void* MapMemory (size_t bytes, PageType pageType)
115 {
116 void* result = nullptr;
117 int prevErrno = errno;
118
119 switch (pageType) {
120 case REGULAR:
121 {
122 result = mmap_impl(bytes);
123 break;
124 }
125 case PREALLOCATED_HUGE_PAGE:
126 {
127 MALLOC_ASSERT((bytes % HUGE_PAGE_SIZE) == 0, "Mapping size should be divisible by huge page size");
128 result = mmap_impl(bytes, nullptr, __TBB_MAP_HUGETLB);
129 break;
130 }
131 case TRANSPARENT_HUGE_PAGE:
132 {
133 MALLOC_ASSERT((bytes % HUGE_PAGE_SIZE) == 0, "Mapping size should be divisible by huge page size");
134 result = mmapTHP(bytes);
135 break;
136 }
137 default:
138 {
139 MALLOC_ASSERT(false, "Unknown page type");
140 }
141 }
142
143 if (result == MAP_FAILED) {
144 errno = prevErrno;
145 return nullptr;
146 }
147
148 return result;
149 }
150
UnmapMemory(void * area,size_t bytes)151 int UnmapMemory(void *area, size_t bytes)
152 {
153 int prevErrno = errno;
154 int ret = munmap(area, bytes);
155 if (-1 == ret)
156 errno = prevErrno;
157 return ret;
158 }
159
160 #elif (_WIN32 || _WIN64) && !__TBB_WIN8UI_SUPPORT
161 #include <windows.h>
162
163 #define MEMORY_MAPPING_USES_MALLOC 0
MapMemory(size_t bytes,PageType)164 void* MapMemory (size_t bytes, PageType)
165 {
166 /* Is VirtualAlloc thread safe? */
167 return VirtualAlloc(nullptr, bytes, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
168 }
169
UnmapMemory(void * area,size_t)170 int UnmapMemory(void *area, size_t /*bytes*/)
171 {
172 BOOL result = VirtualFree(area, 0, MEM_RELEASE);
173 return !result;
174 }
175
176 #else
177
ErrnoPreservingMalloc(size_t bytes)178 void *ErrnoPreservingMalloc(size_t bytes)
179 {
180 int prevErrno = errno;
181 void *ret = malloc( bytes );
182 if (!ret)
183 errno = prevErrno;
184 return ret;
185 }
186
187 #define MEMORY_MAPPING_USES_MALLOC 1
MapMemory(size_t bytes,PageType)188 void* MapMemory (size_t bytes, PageType)
189 {
190 return ErrnoPreservingMalloc( bytes );
191 }
192
UnmapMemory(void * area,size_t)193 int UnmapMemory(void *area, size_t /*bytes*/)
194 {
195 free( area );
196 return 0;
197 }
198
199 #endif /* OS dependent */
200
201 #if MALLOC_CHECK_RECURSION && MEMORY_MAPPING_USES_MALLOC
202 #error Impossible to protect against malloc recursion when memory mapping uses malloc.
203 #endif
204
205 #endif /* _itt_shared_malloc_MapMemory_H */
206