1 // The MIT License (MIT) 2 // 3 // Copyright (c) 2015 Sergey Makeev, Vadim Slyusarev 4 // 5 // Permission is hereby granted, free of charge, to any person obtaining a copy 6 // of this software and associated documentation files (the "Software"), to deal 7 // in the Software without restriction, including without limitation the rights 8 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 // copies of the Software, and to permit persons to whom the Software is 10 // furnished to do so, subject to the following conditions: 11 // 12 // The above copyright notice and this permission notice shall be included in 13 // all copies or substantial portions of the Software. 14 // 15 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 // THE SOFTWARE. 22 23 #pragma once 24 25 #ifndef __MT_THREAD__ 26 #define __MT_THREAD__ 27 28 #include <MTConfig.h> 29 #include <pthread.h> 30 #include <unistd.h> 31 #include <time.h> 32 #include <limits.h> 33 #include <stdlib.h> 34 35 #if MT_PLATFORM_OSX 36 #include <thread> 37 #endif 38 39 #include <sys/mman.h> 40 41 #ifndef MAP_ANONYMOUS 42 #define MAP_ANONYMOUS MAP_ANON 43 #endif 44 45 #ifndef MAP_STACK 46 #define MAP_STACK (0) 47 #endif 48 49 #include <Platform/Common/MTThread.h> 50 #include <MTAppInterop.h> 51 52 namespace MT 53 { 54 class _Fiber; 55 56 class Thread : public ThreadBase 57 { 58 pthread_t thread; 59 pthread_attr_t threadAttr; 60 61 Memory::StackDesc stackDesc; 62 63 size_t stackSize; 64 65 bool isStarted; 66 67 static void* ThreadFuncInternal(void* pThread) 68 { 69 Thread* self = (Thread *)pThread; 70 self->func(self->funcData); 71 return nullptr; 72 } 73 74 #if MT_PLATFORM_OSX 75 //TODO: support OSX priority and bind to processors 76 #else 77 static void GetAffinityMask(cpu_set_t & cpu_mask, uint32 cpuCore) 78 { 79 CPU_ZERO(&cpu_mask); 80 81 if (cpuCore == MT_CPUCORE_ANY) 82 { 83 uint32 threadsCount = (uint32)GetNumberOfHardwareThreads(); 84 for(uint32 i = 0; i < threadsCount; i++) 85 { 86 CPU_SET(i, &cpu_mask); 87 } 88 } else 89 { 90 CPU_SET(cpuCore, &cpu_mask); 91 } 92 } 93 94 95 static int GetPriority(ThreadPriority::Type priority) 96 { 97 int min_prio = sched_get_priority_min (SCHED_FIFO); 98 int max_prio = sched_get_priority_max (SCHED_FIFO); 99 int default_prio = (max_prio - min_prio) / 2; 100 101 switch(priority) 102 { 103 case ThreadPriority::DEFAULT: 104 return default_prio; 105 case ThreadPriority::HIGH: 106 return max_prio; 107 case ThreadPriority::LOW: 108 return min_prio; 109 default: 110 MT_REPORT_ASSERT("Invalid thread priority"); 111 } 112 113 return default_prio; 114 } 115 #endif 116 117 118 public: 119 120 Thread() 121 : stackSize(0) 122 , isStarted(false) 123 { 124 } 125 126 void* GetStackBottom() 127 { 128 return stackDesc.stackBottom; 129 } 130 131 size_t GetStackSize() 132 { 133 return stackSize; 134 } 135 136 137 void Start(size_t _stackSize, TThreadEntryPoint entryPoint, void* userData, uint32 cpuCore = MT_CPUCORE_ANY, ThreadPriority::Type priority = ThreadPriority::DEFAULT) 138 { 139 MT_ASSERT(!isStarted, "Thread already stared"); 140 141 MT_ASSERT(func == nullptr, "Thread already started"); 142 143 func = entryPoint; 144 funcData = userData; 145 146 stackDesc = Memory::AllocStack(_stackSize); 147 stackSize = stackDesc.GetStackSize(); 148 149 MT_ASSERT(stackSize >= PTHREAD_STACK_MIN, "Thread stack to small"); 150 151 int err = pthread_attr_init(&threadAttr); 152 MT_USED_IN_ASSERT(err); 153 MT_ASSERT(err == 0, "pthread_attr_init - error"); 154 155 err = pthread_attr_setstack(&threadAttr, stackDesc.stackBottom, stackSize); 156 MT_USED_IN_ASSERT(err); 157 MT_ASSERT(err == 0, "pthread_attr_setstack - error"); 158 159 err = pthread_attr_setdetachstate(&threadAttr, PTHREAD_CREATE_JOINABLE); 160 MT_USED_IN_ASSERT(err); 161 MT_ASSERT(err == 0, "pthread_attr_setdetachstate - error"); 162 163 #if MT_PLATFORM_OSX 164 MT_UNUSED(cpuCore); 165 MT_UNUSED(priority); 166 167 //TODO: support OSX priority and bind to processors 168 #else 169 err = pthread_attr_setinheritsched(&threadAttr, PTHREAD_EXPLICIT_SCHED); 170 MT_USED_IN_ASSERT(err); 171 MT_ASSERT(err == 0, "pthread_attr_setinheritsched - error"); 172 173 cpu_set_t cpu_mask; 174 GetAffinityMask(cpu_mask, cpuCore); 175 err = pthread_attr_setaffinity_np(&threadAttr, sizeof(cpu_mask), &cpu_mask); 176 MT_USED_IN_ASSERT(err); 177 MT_ASSERT(err == 0, "pthread_attr_setaffinity_np - error"); 178 179 struct sched_param params; 180 params.sched_priority = GetPriority(priority); 181 err = pthread_attr_setschedparam(&threadAttr, ¶ms); 182 MT_USED_IN_ASSERT(err); 183 MT_ASSERT(err == 0, "pthread_attr_setschedparam - error"); 184 #endif 185 186 isStarted = true; 187 188 err = pthread_create(&thread, &threadAttr, ThreadFuncInternal, this); 189 MT_USED_IN_ASSERT(err); 190 MT_ASSERT(err == 0, "pthread_create - error"); 191 } 192 193 void Join() 194 { 195 MT_ASSERT(isStarted, "Thread is not started"); 196 197 if (func == nullptr) 198 { 199 return; 200 } 201 202 void *threadStatus = nullptr; 203 int err = pthread_join(thread, &threadStatus); 204 MT_USED_IN_ASSERT(err); 205 MT_ASSERT(err == 0, "pthread_join - error"); 206 207 err = pthread_attr_destroy(&threadAttr); 208 MT_USED_IN_ASSERT(err); 209 MT_ASSERT(err == 0, "pthread_attr_destroy - error"); 210 211 func = nullptr; 212 funcData = nullptr; 213 214 if (stackDesc.stackMemory != nullptr) 215 { 216 Memory::FreeStack(stackDesc); 217 } 218 219 stackSize = 0; 220 isStarted = false; 221 } 222 223 bool IsCurrentThread() const 224 { 225 if(!isStarted) 226 { 227 return false; 228 } 229 230 pthread_t callThread = pthread_self(); 231 if (pthread_equal(callThread, thread)) 232 { 233 return true; 234 } 235 return false; 236 } 237 238 static int GetNumberOfHardwareThreads() 239 { 240 #if MT_PLATFORM_OSX 241 return std::thread::hardware_concurrency(); 242 #else 243 long numberOfProcessors = sysconf( _SC_NPROCESSORS_ONLN ); 244 return (int)numberOfProcessors; 245 #endif 246 } 247 248 #ifdef MT_INSTRUMENTED_BUILD 249 static void SetThreadName(const char* threadName) 250 { 251 pthread_t callThread = pthread_self(); 252 pthread_setname_np(callThread, threadName); 253 } 254 #endif 255 256 static void SetThreadSchedulingPolicy(uint32 cpuCore, ThreadPriority::Type priority = ThreadPriority::DEFAULT) 257 { 258 #if MT_PLATFORM_OSX 259 MT_UNUSED(cpuCore); 260 MT_UNUSED(priority); 261 262 //TODO: support OSX priority and bind to processors 263 #else 264 pthread_t callThread = pthread_self(); 265 266 int sched_priority = GetPriority(priority); 267 int err = pthread_setschedprio(callThread, sched_priority); 268 MT_USED_IN_ASSERT(err); 269 MT_ASSERT(err == 0, "pthread_setschedprio - error"); 270 271 cpu_set_t cpu_mask; 272 GetAffinityMask(cpu_mask, cpuCore); 273 err = pthread_setaffinity_np(callThread, sizeof(cpu_mask), &cpu_mask); 274 MT_USED_IN_ASSERT(err); 275 MT_ASSERT(err == 0, "pthread_setaffinity_np - error"); 276 #endif 277 } 278 279 280 static void Sleep(uint32 milliseconds) 281 { 282 struct timespec req; 283 time_t sec = (int)(milliseconds/1000); 284 milliseconds = milliseconds - (sec*1000); 285 req.tv_sec = sec; 286 req.tv_nsec = milliseconds * 1000000L; 287 while (nanosleep(&req,&req) == -1 ) 288 { 289 continue; 290 } 291 } 292 293 }; 294 295 296 } 297 298 299 #endif 300