xref: /oneTBB/src/tbbmalloc/MapMemory.h (revision c21e688a)
151c0b2f7Stbbdev /*
2*c21e688aSSergey Zheltov     Copyright (c) 2005-2022 Intel Corporation
351c0b2f7Stbbdev 
451c0b2f7Stbbdev     Licensed under the Apache License, Version 2.0 (the "License");
551c0b2f7Stbbdev     you may not use this file except in compliance with the License.
651c0b2f7Stbbdev     You may obtain a copy of the License at
751c0b2f7Stbbdev 
851c0b2f7Stbbdev         http://www.apache.org/licenses/LICENSE-2.0
951c0b2f7Stbbdev 
1051c0b2f7Stbbdev     Unless required by applicable law or agreed to in writing, software
1151c0b2f7Stbbdev     distributed under the License is distributed on an "AS IS" BASIS,
1251c0b2f7Stbbdev     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1351c0b2f7Stbbdev     See the License for the specific language governing permissions and
1451c0b2f7Stbbdev     limitations under the License.
1551c0b2f7Stbbdev */
1651c0b2f7Stbbdev 
1751c0b2f7Stbbdev #ifndef _itt_shared_malloc_MapMemory_H
1851c0b2f7Stbbdev #define _itt_shared_malloc_MapMemory_H
1951c0b2f7Stbbdev 
2051c0b2f7Stbbdev #include <stdlib.h>
2151c0b2f7Stbbdev 
22734f0bc0SPablo Romero #if __unix__ || __APPLE__ || __sun || __FreeBSD__
2351c0b2f7Stbbdev 
2451c0b2f7Stbbdev #if __sun && !defined(_XPG4_2)
2551c0b2f7Stbbdev  // To have void* as mmap's 1st argument
2651c0b2f7Stbbdev  #define _XPG4_2 1
2751c0b2f7Stbbdev  #define XPG4_WAS_DEFINED 1
2851c0b2f7Stbbdev #endif
2951c0b2f7Stbbdev 
3051c0b2f7Stbbdev #include <sys/mman.h>
31734f0bc0SPablo Romero #if __unix__
3251c0b2f7Stbbdev /* __TBB_MAP_HUGETLB is MAP_HUGETLB from system header linux/mman.h.
3351c0b2f7Stbbdev    The header is not included here, as on some Linux flavors inclusion of
3451c0b2f7Stbbdev    linux/mman.h leads to compilation error,
3551c0b2f7Stbbdev    while changing of MAP_HUGETLB is highly unexpected.
3651c0b2f7Stbbdev */
3751c0b2f7Stbbdev #define __TBB_MAP_HUGETLB 0x40000
3851c0b2f7Stbbdev #else
3951c0b2f7Stbbdev #define __TBB_MAP_HUGETLB 0
4051c0b2f7Stbbdev #endif
4151c0b2f7Stbbdev 
4251c0b2f7Stbbdev #if XPG4_WAS_DEFINED
4351c0b2f7Stbbdev  #undef _XPG4_2
4451c0b2f7Stbbdev  #undef XPG4_WAS_DEFINED
4551c0b2f7Stbbdev #endif
4651c0b2f7Stbbdev 
4757f524caSIlya Isaev inline void* mmap_impl(size_t map_size, void* map_hint = nullptr, int map_flags = 0) {
4851c0b2f7Stbbdev #ifndef MAP_ANONYMOUS
4951c0b2f7Stbbdev // macOS* defines MAP_ANON, which is deprecated in Linux*.
5051c0b2f7Stbbdev #define MAP_ANONYMOUS MAP_ANON
5151c0b2f7Stbbdev #endif /* MAP_ANONYMOUS */
5251c0b2f7Stbbdev     return mmap(map_hint, map_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | map_flags, -1, 0);
5351c0b2f7Stbbdev }
5451c0b2f7Stbbdev 
mmapTHP(size_t bytes)5551c0b2f7Stbbdev inline void* mmapTHP(size_t bytes) {
5651c0b2f7Stbbdev     // Initializes in zero-initialized data section
5751c0b2f7Stbbdev     static void* hint;
5851c0b2f7Stbbdev 
5951c0b2f7Stbbdev     // Optimistically try to use a last huge page aligned region end
6051c0b2f7Stbbdev     // as a hint for mmap.
6151c0b2f7Stbbdev     hint = hint ? (void*)((uintptr_t)hint - bytes) : hint;
6251c0b2f7Stbbdev     void* result = mmap_impl(bytes, hint);
6351c0b2f7Stbbdev 
6451c0b2f7Stbbdev     // Something went wrong
6551c0b2f7Stbbdev     if (result == MAP_FAILED) {
6657f524caSIlya Isaev         hint = nullptr;
6751c0b2f7Stbbdev         return MAP_FAILED;
6851c0b2f7Stbbdev     }
6951c0b2f7Stbbdev 
7051c0b2f7Stbbdev     // Otherwise, fall back to the slow path - map oversized region
7151c0b2f7Stbbdev     // and trim excess parts.
7251c0b2f7Stbbdev     if (!isAligned(result, HUGE_PAGE_SIZE)) {
7351c0b2f7Stbbdev         // Undo previous try
7451c0b2f7Stbbdev         munmap(result, bytes);
7551c0b2f7Stbbdev 
7651c0b2f7Stbbdev         // Map oversized on huge page size region
7751c0b2f7Stbbdev         result = mmap_impl(bytes + HUGE_PAGE_SIZE);
7851c0b2f7Stbbdev 
7951c0b2f7Stbbdev         // Something went wrong
8051c0b2f7Stbbdev         if (result == MAP_FAILED) {
8157f524caSIlya Isaev             hint = nullptr;
8251c0b2f7Stbbdev             return MAP_FAILED;
8351c0b2f7Stbbdev         }
8451c0b2f7Stbbdev 
8551c0b2f7Stbbdev         // Misalignment offset
8651c0b2f7Stbbdev         uintptr_t offset = 0;
8751c0b2f7Stbbdev 
8851c0b2f7Stbbdev         if (!isAligned(result, HUGE_PAGE_SIZE)) {
8951c0b2f7Stbbdev             // Trim excess head of a region if it is no aligned
9051c0b2f7Stbbdev             offset = HUGE_PAGE_SIZE - ((uintptr_t)result & (HUGE_PAGE_SIZE - 1));
9151c0b2f7Stbbdev             munmap(result, offset);
9251c0b2f7Stbbdev 
9351c0b2f7Stbbdev             // New region beginning
9451c0b2f7Stbbdev             result = (void*)((uintptr_t)result + offset);
9551c0b2f7Stbbdev         }
9651c0b2f7Stbbdev 
9751c0b2f7Stbbdev         // Trim excess tail of a region
9851c0b2f7Stbbdev         munmap((void*)((uintptr_t)result + bytes), HUGE_PAGE_SIZE - offset);
9951c0b2f7Stbbdev     }
10051c0b2f7Stbbdev 
10151c0b2f7Stbbdev     // Assume, that mmap virtual addresses grow down by default
10251c0b2f7Stbbdev     // So, set a hint as a result of a last successful allocation
10351c0b2f7Stbbdev     // and then use it minus requested size as a new mapping point.
10451c0b2f7Stbbdev     // TODO: Atomic store is meant here, fence not needed, but
10551c0b2f7Stbbdev     // currently we don't have such function.
10651c0b2f7Stbbdev     hint = result;
10751c0b2f7Stbbdev 
10851c0b2f7Stbbdev     MALLOC_ASSERT(isAligned(result, HUGE_PAGE_SIZE), "Mapped address is not aligned on huge page size.");
10951c0b2f7Stbbdev 
11051c0b2f7Stbbdev     return result;
11151c0b2f7Stbbdev }
11251c0b2f7Stbbdev 
11351c0b2f7Stbbdev #define MEMORY_MAPPING_USES_MALLOC 0
MapMemory(size_t bytes,PageType pageType)11451c0b2f7Stbbdev void* MapMemory (size_t bytes, PageType pageType)
11551c0b2f7Stbbdev {
11657f524caSIlya Isaev     void* result = nullptr;
11751c0b2f7Stbbdev     int prevErrno = errno;
11851c0b2f7Stbbdev 
11951c0b2f7Stbbdev     switch (pageType) {
12051c0b2f7Stbbdev         case REGULAR:
12151c0b2f7Stbbdev         {
12251c0b2f7Stbbdev             result = mmap_impl(bytes);
12351c0b2f7Stbbdev             break;
12451c0b2f7Stbbdev         }
12551c0b2f7Stbbdev         case PREALLOCATED_HUGE_PAGE:
12651c0b2f7Stbbdev         {
12751c0b2f7Stbbdev             MALLOC_ASSERT((bytes % HUGE_PAGE_SIZE) == 0, "Mapping size should be divisible by huge page size");
12857f524caSIlya Isaev             result = mmap_impl(bytes, nullptr, __TBB_MAP_HUGETLB);
12951c0b2f7Stbbdev             break;
13051c0b2f7Stbbdev         }
13151c0b2f7Stbbdev         case TRANSPARENT_HUGE_PAGE:
13251c0b2f7Stbbdev         {
13351c0b2f7Stbbdev             MALLOC_ASSERT((bytes % HUGE_PAGE_SIZE) == 0, "Mapping size should be divisible by huge page size");
13451c0b2f7Stbbdev             result = mmapTHP(bytes);
13551c0b2f7Stbbdev             break;
13651c0b2f7Stbbdev         }
13751c0b2f7Stbbdev         default:
13851c0b2f7Stbbdev         {
13951c0b2f7Stbbdev             MALLOC_ASSERT(false, "Unknown page type");
14051c0b2f7Stbbdev         }
14151c0b2f7Stbbdev     }
14251c0b2f7Stbbdev 
14351c0b2f7Stbbdev     if (result == MAP_FAILED) {
14451c0b2f7Stbbdev         errno = prevErrno;
14557f524caSIlya Isaev         return nullptr;
14651c0b2f7Stbbdev     }
14751c0b2f7Stbbdev 
14851c0b2f7Stbbdev     return result;
14951c0b2f7Stbbdev }
15051c0b2f7Stbbdev 
UnmapMemory(void * area,size_t bytes)15151c0b2f7Stbbdev int UnmapMemory(void *area, size_t bytes)
15251c0b2f7Stbbdev {
15351c0b2f7Stbbdev     int prevErrno = errno;
15451c0b2f7Stbbdev     int ret = munmap(area, bytes);
15551c0b2f7Stbbdev     if (-1 == ret)
15651c0b2f7Stbbdev         errno = prevErrno;
15751c0b2f7Stbbdev     return ret;
15851c0b2f7Stbbdev }
15951c0b2f7Stbbdev 
16051c0b2f7Stbbdev #elif (_WIN32 || _WIN64) && !__TBB_WIN8UI_SUPPORT
16151c0b2f7Stbbdev #include <windows.h>
16251c0b2f7Stbbdev 
16351c0b2f7Stbbdev #define MEMORY_MAPPING_USES_MALLOC 0
MapMemory(size_t bytes,PageType)16451c0b2f7Stbbdev void* MapMemory (size_t bytes, PageType)
16551c0b2f7Stbbdev {
16651c0b2f7Stbbdev     /* Is VirtualAlloc thread safe? */
16757f524caSIlya Isaev     return VirtualAlloc(nullptr, bytes, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
16851c0b2f7Stbbdev }
16951c0b2f7Stbbdev 
UnmapMemory(void * area,size_t)17051c0b2f7Stbbdev int UnmapMemory(void *area, size_t /*bytes*/)
17151c0b2f7Stbbdev {
17251c0b2f7Stbbdev     BOOL result = VirtualFree(area, 0, MEM_RELEASE);
17351c0b2f7Stbbdev     return !result;
17451c0b2f7Stbbdev }
17551c0b2f7Stbbdev 
17651c0b2f7Stbbdev #else
17751c0b2f7Stbbdev 
ErrnoPreservingMalloc(size_t bytes)178478de5b1Stbbdev void *ErrnoPreservingMalloc(size_t bytes)
179478de5b1Stbbdev {
180478de5b1Stbbdev     int prevErrno = errno;
181478de5b1Stbbdev     void *ret = malloc( bytes );
182478de5b1Stbbdev     if (!ret)
183478de5b1Stbbdev         errno = prevErrno;
184478de5b1Stbbdev     return ret;
185478de5b1Stbbdev }
186478de5b1Stbbdev 
18751c0b2f7Stbbdev #define MEMORY_MAPPING_USES_MALLOC 1
MapMemory(size_t bytes,PageType)18851c0b2f7Stbbdev void* MapMemory (size_t bytes, PageType)
18951c0b2f7Stbbdev {
19051c0b2f7Stbbdev     return ErrnoPreservingMalloc( bytes );
19151c0b2f7Stbbdev }
19251c0b2f7Stbbdev 
UnmapMemory(void * area,size_t)19351c0b2f7Stbbdev int UnmapMemory(void *area, size_t /*bytes*/)
19451c0b2f7Stbbdev {
19551c0b2f7Stbbdev     free( area );
19651c0b2f7Stbbdev     return 0;
19751c0b2f7Stbbdev }
19851c0b2f7Stbbdev 
19951c0b2f7Stbbdev #endif /* OS dependent */
20051c0b2f7Stbbdev 
20151c0b2f7Stbbdev #if MALLOC_CHECK_RECURSION && MEMORY_MAPPING_USES_MALLOC
20251c0b2f7Stbbdev #error Impossible to protect against malloc recursion when memory mapping uses malloc.
20351c0b2f7Stbbdev #endif
20451c0b2f7Stbbdev 
20551c0b2f7Stbbdev #endif /* _itt_shared_malloc_MapMemory_H */
206