1 // The MIT License (MIT)
2 //
3 // 	Copyright (c) 2015 Sergey Makeev, Vadim Slyusarev
4 //
5 // 	Permission is hereby granted, free of charge, to any person obtaining a copy
6 // 	of this software and associated documentation files (the "Software"), to deal
7 // 	in the Software without restriction, including without limitation the rights
8 // 	to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 // 	copies of the Software, and to permit persons to whom the Software is
10 // 	furnished to do so, subject to the following conditions:
11 //
12 //  The above copyright notice and this permission notice shall be included in
13 // 	all copies or substantial portions of the Software.
14 //
15 // 	THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 // 	IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 // 	FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 // 	AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 // 	LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 // 	OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 // 	THE SOFTWARE.
22 
23 #pragma once
24 
25 #ifndef __MT_THREAD__
26 #define __MT_THREAD__
27 
28 #include <MTConfig.h>
29 #include <pthread.h>
30 #include <unistd.h>
31 #include <time.h>
32 #include <limits.h>
33 #include <stdlib.h>
34 #include <sched.h>
35 
36 #if MT_PLATFORM_OSX
37 #include <thread>
38 #endif
39 
40 #include <sys/mman.h>
41 
42 #ifndef MAP_ANONYMOUS
43     #define MAP_ANONYMOUS MAP_ANON
44 #endif
45 
46 #ifndef MAP_STACK
47     #define MAP_STACK (0)
48 #endif
49 
50 #include <Platform/Common/MTThread.h>
51 #include <MTAppInterop.h>
52 
53 namespace MT
54 {
55 	//
56 	// Signals the calling thread to yield execution to another thread that is ready to run.
57 	//
58 	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
YieldThread()59 	inline void YieldThread()
60 	{
61 		int err = sched_yield();
62 		MT_USED_IN_ASSERT(err);
63 		MT_ASSERT(err == 0, "pthread_yield - error");
64 	}
65 
66 
67 	class ThreadId
68 	{
69 	protected:
70 		pthread_t id;
71 		Atomic32<uint32> isInitialized;
72 
Assign(const ThreadId & other)73 		void Assign(const ThreadId& other)
74 		{
75 			id = other.id;
76 			isInitialized.Store(other.isInitialized.Load());
77 		}
78 
79 	public:
80 
ThreadId()81 		ThreadId()
82 		{
83 			isInitialized.Store(0);
84 		}
85 
ThreadId(const ThreadId & other)86 		mt_forceinline ThreadId(const ThreadId& other)
87 		{
88 			Assign(other);
89 		}
90 
91 		mt_forceinline ThreadId& operator=(const ThreadId& other)
92 		{
93 			Assign(other);
94 			return *this;
95 		}
96 
Self()97 		mt_forceinline static ThreadId Self()
98 		{
99 			ThreadId selfThread;
100 			selfThread.id = pthread_self();
101 			selfThread.isInitialized.Store(1);
102 			return selfThread;
103 		}
104 
IsValid()105 		mt_forceinline bool IsValid() const
106 		{
107 			return (isInitialized.Load() != 0);
108 		}
109 
IsEqual(const ThreadId & other)110 		mt_forceinline bool IsEqual(const ThreadId& other) const
111 		{
112 			if (isInitialized.Load() != other.isInitialized.Load())
113 			{
114 				return false;
115 			}
116 			if (pthread_equal(id, other.id) == false)
117 			{
118 				return false;
119 			}
120 			return true;
121 		}
122 
AsUInt64()123 		mt_forceinline uint64 AsUInt64() const
124 		{
125 			if (isInitialized.Load() == 0)
126 			{
127 				return (uint64)-1;
128 			}
129 
130 			return (uint64)id;
131 		}
132 	};
133 
134 
135 
136 	class Thread : public ThreadBase
137 	{
138 		pthread_t thread;
139 		pthread_attr_t threadAttr;
140 
141 		Memory::StackDesc stackDesc;
142 
143 		size_t stackSize;
144 
145 		bool isStarted;
146 
ThreadFuncInternal(void * pThread)147 		static void* ThreadFuncInternal(void* pThread)
148 		{
149 			Thread* self = (Thread *)pThread;
150 			self->func(self->funcData);
151 			return nullptr;
152 		}
153 
154 #if MT_PLATFORM_OSX
155 		//TODO: support OSX priority and bind to processors
156 #else
GetAffinityMask(cpu_set_t & cpu_mask,uint32 cpuCore)157 		static void GetAffinityMask(cpu_set_t & cpu_mask, uint32 cpuCore)
158 		{
159 			CPU_ZERO(&cpu_mask);
160 
161 			if (cpuCore == MT_CPUCORE_ANY)
162 			{
163 				uint32 threadsCount = (uint32)GetNumberOfHardwareThreads();
164 				for(uint32 i = 0; i < threadsCount; i++)
165 				{
166 					CPU_SET(i, &cpu_mask);
167 				}
168 			} else
169 			{
170 				CPU_SET(cpuCore, &cpu_mask);
171 			}
172 		}
173 
174 
GetPriority(ThreadPriority::Type priority)175 		static int GetPriority(ThreadPriority::Type priority)
176 		{
177 			int min_prio = sched_get_priority_min (SCHED_FIFO);
178 			int max_prio = sched_get_priority_max (SCHED_FIFO);
179 			int default_prio = (max_prio - min_prio) / 2;
180 
181 			switch(priority)
182 			{
183 			case ThreadPriority::DEFAULT:
184 				return default_prio;
185 			case ThreadPriority::HIGH:
186 				return max_prio;
187 			case ThreadPriority::LOW:
188 				return min_prio;
189 			default:
190 				MT_REPORT_ASSERT("Invalid thread priority");
191 			}
192 
193 			return default_prio;
194 		}
195 #endif
196 
197 
198 	public:
199 
Thread()200 		Thread()
201 			: stackSize(0)
202 			, isStarted(false)
203 		{
204 		}
205 
GetStackBottom()206 		void* GetStackBottom()
207 		{
208 			return stackDesc.stackBottom;
209 		}
210 
GetStackSize()211 		size_t GetStackSize()
212 		{
213 			return stackSize;
214 		}
215 
216 
217 		void Start(size_t _stackSize, TThreadEntryPoint entryPoint, void* userData, uint32 cpuCore = MT_CPUCORE_ANY, ThreadPriority::Type priority = ThreadPriority::DEFAULT)
218 		{
219 			MT_ASSERT(!isStarted, "Thread already stared");
220 
221 			MT_ASSERT(func == nullptr, "Thread already started");
222 
223 			func = entryPoint;
224 			funcData = userData;
225 
226 			stackDesc = Memory::AllocStack(_stackSize);
227 			stackSize = stackDesc.GetStackSize();
228 
229 			MT_ASSERT(stackSize >= PTHREAD_STACK_MIN, "Thread stack to small");
230 
231 			int err = pthread_attr_init(&threadAttr);
232 			MT_USED_IN_ASSERT(err);
233 			MT_ASSERT(err == 0, "pthread_attr_init - error");
234 
235 			err = pthread_attr_setstack(&threadAttr, stackDesc.stackBottom, stackSize);
236 			MT_USED_IN_ASSERT(err);
237 			MT_ASSERT(err == 0, "pthread_attr_setstack - error");
238 
239 			err = pthread_attr_setdetachstate(&threadAttr, PTHREAD_CREATE_JOINABLE);
240 			MT_USED_IN_ASSERT(err);
241 			MT_ASSERT(err == 0, "pthread_attr_setdetachstate - error");
242 
243 #if MT_PLATFORM_OSX
244 			MT_UNUSED(cpuCore);
245 			MT_UNUSED(priority);
246 
247 			//TODO: support OSX priority and bind to processors
248 #else
249 			err = pthread_attr_setinheritsched(&threadAttr, PTHREAD_EXPLICIT_SCHED);
250 			MT_USED_IN_ASSERT(err);
251 			MT_ASSERT(err == 0, "pthread_attr_setinheritsched - error");
252 
253 			cpu_set_t cpu_mask;
254 			GetAffinityMask(cpu_mask, cpuCore);
255 			err = pthread_attr_setaffinity_np(&threadAttr, sizeof(cpu_mask), &cpu_mask);
256 			MT_USED_IN_ASSERT(err);
257 			MT_ASSERT(err == 0, "pthread_attr_setaffinity_np - error");
258 
259 			struct sched_param params;
260 			params.sched_priority = GetPriority(priority);
261 			err = pthread_attr_setschedparam(&threadAttr, &params);
262 			MT_USED_IN_ASSERT(err);
263 			MT_ASSERT(err == 0, "pthread_attr_setschedparam - error");
264 #endif
265 
266 			isStarted = true;
267 
268 			err = pthread_create(&thread, &threadAttr, ThreadFuncInternal, this);
269 			MT_USED_IN_ASSERT(err);
270 			MT_ASSERT(err == 0, "pthread_create - error");
271 		}
272 
Join()273 		void Join()
274 		{
275 			MT_ASSERT(isStarted, "Thread is not started");
276 
277 			if (func == nullptr)
278 			{
279 				return;
280 			}
281 
282 			void *threadStatus = nullptr;
283 			int err = pthread_join(thread, &threadStatus);
284 			MT_USED_IN_ASSERT(err);
285 			MT_ASSERT(err == 0, "pthread_join - error");
286 
287 			err = pthread_attr_destroy(&threadAttr);
288 			MT_USED_IN_ASSERT(err);
289 			MT_ASSERT(err == 0, "pthread_attr_destroy - error");
290 
291 			func = nullptr;
292 			funcData = nullptr;
293 
294 			if (stackDesc.stackMemory != nullptr)
295 			{
296 				Memory::FreeStack(stackDesc);
297 			}
298 
299 			stackSize = 0;
300 			isStarted = false;
301 		}
302 
303 
GetNumberOfHardwareThreads()304 		static int GetNumberOfHardwareThreads()
305 		{
306 #if MT_PLATFORM_OSX
307             return std::thread::hardware_concurrency();
308 #else
309 			long numberOfProcessors = sysconf( _SC_NPROCESSORS_ONLN );
310 			return (int)numberOfProcessors;
311 #endif
312 		}
313 
314 #ifdef MT_INSTRUMENTED_BUILD
SetThreadName(const char * threadName)315 		static void SetThreadName(const char* threadName)
316 		{
317 			pthread_t callThread = pthread_self();
318 			pthread_setname_np(callThread, threadName);
319 		}
320 #endif
321 
322 		static void SetThreadSchedulingPolicy(uint32 cpuCore, ThreadPriority::Type priority = ThreadPriority::DEFAULT)
323 		{
324 #if MT_PLATFORM_OSX
325 			MT_UNUSED(cpuCore);
326 			MT_UNUSED(priority);
327 
328 			//TODO: support OSX priority and bind to processors
329 #else
330 			pthread_t callThread = pthread_self();
331 
332 			int sched_priority = GetPriority(priority);
333 			int err = pthread_setschedprio(callThread, sched_priority);
334 			MT_USED_IN_ASSERT(err);
335 			MT_ASSERT(err == 0, "pthread_setschedprio - error");
336 
337 			cpu_set_t cpu_mask;
338 			GetAffinityMask(cpu_mask, cpuCore);
339 			err = pthread_setaffinity_np(callThread, sizeof(cpu_mask), &cpu_mask);
340 			MT_USED_IN_ASSERT(err);
341 			MT_ASSERT(err == 0, "pthread_setaffinity_np - error");
342 #endif
343 		}
344 
345 
Sleep(uint32 milliseconds)346 		static void Sleep(uint32 milliseconds)
347 		{
348 			struct timespec req;
349 			int sec = (int)(milliseconds / 1000);
350 			milliseconds = milliseconds - (sec*1000);
351 			req.tv_sec = sec;
352 			req.tv_nsec = milliseconds * 1000000L;
353 			while (nanosleep(&req,&req) == -1 )
354 			{
355 				continue;
356 			}
357 		}
358 
359 	};
360 
361 
362 }
363 
364 
365 #endif
366