1 // The MIT License (MIT)
2 //
3 // 	Copyright (c) 2015 Sergey Makeev, Vadim Slyusarev
4 //
5 // 	Permission is hereby granted, free of charge, to any person obtaining a copy
6 // 	of this software and associated documentation files (the "Software"), to deal
7 // 	in the Software without restriction, including without limitation the rights
8 // 	to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 // 	copies of the Software, and to permit persons to whom the Software is
10 // 	furnished to do so, subject to the following conditions:
11 //
12 //  The above copyright notice and this permission notice shall be included in
13 // 	all copies or substantial portions of the Software.
14 //
15 // 	THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 // 	IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 // 	FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 // 	AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 // 	LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 // 	OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 // 	THE SOFTWARE.
22 
23 #pragma once
24 
25 #ifndef __MT_STACK__
26 #define __MT_STACK__
27 
28 #include <MTConfig.h>
29 #include <array>
30 #include <limits>
31 
32 namespace MT
33 {
34 	static const int32 invalidStackId = 0;
35 	static const int32 invalidStorageId = 0;
36 
37 
38 	//
39 	// Scope descriptor
40 	//
41 	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
42 	class ScopeDesc
43 	{
44 	protected:
45 
46 		//descriptor name
47 		const char* name;
48 
49 		//descriptor declaration file/line
50 		const char* file;
51 		int32 line;
52 
53 	public:
54 
ScopeDesc(const char * srcFile,int32 srcLine,const char * scopeName)55 		ScopeDesc(const char* srcFile, int32 srcLine, const char* scopeName)
56 			: name(scopeName)
57 			, file(srcFile)
58 			, line(srcLine)
59 		{
60 		}
61 
GetSourceFile()62 		const char* GetSourceFile() const
63 		{
64 			return file;
65 		}
66 
GetSourceLine()67 		int32 GetSourceLine() const
68 		{
69 			return line;
70 		}
71 
GetName()72 		const char* GetName() const
73 		{
74 			return name;
75 		}
76 
77 
78 	};
79 
80 
81 	//
82 	// Scope stack entry
83 	//
84 	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
85 	class ScopeStackEntry
86 	{
87 		int32 parentIndex;
88 		int32 descIndex;
89 
90 	public:
91 
ScopeStackEntry(int32 _parentIndex,int32 _descIndex)92 		ScopeStackEntry(int32 _parentIndex, int32 _descIndex)
93 			: parentIndex(_parentIndex)
94 			, descIndex(_descIndex)
95 		{
96 		}
97 
98 #if MT_DEBUG
~ScopeStackEntry()99 		~ScopeStackEntry()
100 		{
101 			parentIndex = std::numeric_limits<int32>::lowest();
102 			descIndex = std::numeric_limits<int32>::lowest();
103 		}
104 #endif
105 
GetParentId()106 		int32 GetParentId() const
107 		{
108 			return parentIndex;
109 		}
110 
GetDescriptionId()111 		int32 GetDescriptionId() const
112 		{
113 			return descIndex;
114 		}
115 
116 
117 	};
118 
119 
120 	//
121 	// Persistent scope descriptor storage
122 	//
123 	//   persistent storage used to store scope descriptors
124 	//   descriptors lifetime is equal to the storage lifetime
125 	//
126 	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
127 	template<typename T, uint32 capacity>
128 	class PersistentScopeDescriptorStorage
129 	{
130 		static const int32 ALIGNMENT = 16;
131 		static const int32 ALIGNMENT_MASK = (ALIGNMENT-1);
132 
133 		MT::Atomic32<int32> top;
134 		//additional bytes for alignment
135 		byte rawMemory_[ capacity * sizeof(T)  + ALIGNMENT];
136 
137 
IndexToObject(int32 index)138 		T* IndexToObject(int32 index)
139 		{
140 			byte* alignedMemory = (byte*)( ( (uintptr_t)&rawMemory_[0] + ALIGNMENT_MASK ) & ~(uintptr_t)ALIGNMENT_MASK );
141 			T* pObjectMemory = (T*)(alignedMemory + index * sizeof(T));
142 			return pObjectMemory;
143 		}
144 
145 
AllocObject(int32 & id)146 		T* AllocObject(int32 & id)
147 		{
148 			//new element index
149 			int32 index = top.IncFetch() - 1;
150 			MT_VERIFY(index < (int32)capacity, "Area allocator is full. Can't allocate more memory.", return nullptr);
151 			//get memory for object
152 			T* pObject = IndexToObject(index);
153 			id = (index + 1);
154 			return pObject;
155 		}
156 
157 
158 	public:
159 
PersistentScopeDescriptorStorage()160 		PersistentScopeDescriptorStorage()
161 		{
162 			static_assert(std::is_base_of<MT::ScopeDesc, T>::value, "Type must be derived from MT::ScopeDesc");
163 			top.Store(0);
164 		}
165 
~PersistentScopeDescriptorStorage()166 		~PersistentScopeDescriptorStorage()
167 		{
168 			int32 count = top.Exchange(0);
169 			for (int32 i = 0; i < count; i++)
170 			{
171 				T* pObject = IndexToObject(i);
172 				MT_UNUSED(pObject);
173 				pObject->~T();
174 			}
175 		}
176 
Alloc(const char * srcFile,int32 srcLine,const char * scopeName)177 		int32 Alloc(const char* srcFile, int32 srcLine, const char* scopeName)
178 		{
179 			int32 id;
180 			T* pObject = AllocObject(id);
181 			if (pObject == nullptr)
182 				return invalidStorageId;
183 
184 			//placement ctor
185 			new(pObject) T(srcFile, srcLine, scopeName);
186 
187 			return id;
188 		}
189 
190 		template<typename T1>
Alloc(const char * srcFile,int32 srcLine,const char * scopeName,const T1 & p1)191 		int32 Alloc(const char* srcFile, int32 srcLine, const char* scopeName, const T1 & p1)
192 		{
193 			int32 id;
194 			T* pObject = AllocObject(id);
195 			if (pObject == nullptr)
196 				return invalidStorageId;
197 
198 			//placement ctor
199 			new(pObject) T(srcFile, srcLine, scopeName, p1);
200 			return id;
201 		}
202 
203 		template<typename T1, typename T2>
Alloc(const char * srcFile,int32 srcLine,const char * scopeName,const T1 & p1,const T1 & p2)204 		int32 Alloc(const char* srcFile, int32 srcLine, const char* scopeName, const T1 & p1, const T1 & p2)
205 		{
206 			int32 id;
207 			T* pObject = AllocObject(id);
208 			if (pObject == nullptr)
209 				return invalidStorageId;
210 
211 			//placement ctor
212 			new(pObject) T(srcFile, srcLine, scopeName, p1, p2);
213 			return id;
214 		}
215 
216 		template<typename T1, typename T2>
Alloc(const char * srcFile,int32 srcLine,const char * scopeName,const T1 & p1,const T1 & p2,const T1 & p3)217 		int32 Alloc(const char* srcFile, int32 srcLine, const char* scopeName, const T1 & p1, const T1 & p2, const T1 & p3)
218 		{
219 			int32 id;
220 			T* pObject = AllocObject(id);
221 			if (pObject == nullptr)
222 				return invalidStorageId;
223 
224 			//placement ctor
225 			new(pObject) T(srcFile, srcLine, scopeName, p1, p2, p3);
226 			return id;
227 		}
228 
Get(int32 id)229 		T* Get(int32 id)
230 		{
231 			MT_VERIFY(id > invalidStorageId, "Invalid ID", return nullptr );
232 			MT_VERIFY(id <= top.Load(), "Invalid ID", return nullptr );
233 			int32 index = ( id - 1);
234 			T* pObject = IndexToObject(index);
235 			return pObject;
236 		}
237 
238 	};
239 
240 
241 	//
242 	// Weak scope stack
243 	//
244 	//  Weak stack, which means that any data from the stack become invalid after stack entry is popped from stack
245 	//  Weak stack uses a small amount of memory, but in the case of deferred use the stack entires you must copy this entries to extend lifetime.
246 	//
247 	//  Well suited as asset/resource names stack.
248 	//
249 	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
250 	template<typename T, uint32 capacity>
251 	class WeakScopeStack
252 	{
253 		static const int32 ALIGNMENT = 16;
254 		static const int32 ALIGNMENT_MASK = (ALIGNMENT-1);
255 
256 		int32 top;
257 		byte rawMemory_[ capacity * sizeof(T)  + ALIGNMENT];
258 
IndexToObject(int32 index)259 		T* IndexToObject(int32 index)
260 		{
261 			byte* alignedMemory = (byte*)( ( (uintptr_t)&rawMemory_[0] + ALIGNMENT_MASK ) & ~(uintptr_t)ALIGNMENT_MASK );
262 			T* pObjectMemory = (T*)(alignedMemory + index * sizeof(T));
263 			return pObjectMemory;
264 		}
265 
266 
AllocObject()267 		T* AllocObject()
268 		{
269 			int32 index = top;
270 			MT_VERIFY(index < (int32)capacity, "Stack allocator overflow. Can't allocate more memory.", return nullptr);
271 			top++;
272 			T* pObject = IndexToObject(index);
273 			return pObject;
274 		}
275 
276 	public:
277 
WeakScopeStack()278 		WeakScopeStack()
279 		{
280 			static_assert(std::is_base_of<MT::ScopeStackEntry, T>::value, "Type must be derived from MT::ScopeStackEntry");
281 			top = invalidStackId;
282 		}
283 
~WeakScopeStack()284 		~WeakScopeStack()
285 		{
286 			for(int32 i = 0; i < top; i++)
287 			{
288 				T* pObject = IndexToObject(i);
289 				MT_UNUSED(pObject);
290 				pObject->~T();
291 			}
292 			top = 0;
293 		}
294 
295 
Get(int32 id)296 		T* Get(int32 id)
297 		{
298 			MT_VERIFY(id > invalidStackId, "Invalid id", return nullptr);
299 			int32 index = (id - 1);
300 			return IndexToObject(index);
301 		}
302 
Top()303 		int32 Top()
304 		{
305 			int32 id = top;
306 			return id;
307 		}
308 
Pop()309 		void Pop()
310 		{
311 			top--;
312 			int32 index = top;
313 			MT_ASSERT(index >= 0, "Stack already empty. Invalid call.");
314 			T* pObject = IndexToObject(index);
315 			MT_UNUSED(pObject);
316 			pObject->~T();
317 		}
318 
Push()319 		T* Push()
320 		{
321 			T* pObject = AllocObject();
322 			new(pObject) T();
323 			return pObject;
324 		}
325 
326 		template<typename T1>
Push(const T1 & p1)327 		T* Push(const T1 & p1)
328 		{
329 			T* pObject = AllocObject();
330 			new(pObject) T(p1);
331 			return pObject;
332 		}
333 
334 		template<typename T1, typename T2>
Push(const T1 & p1,const T2 & p2)335 		T* Push(const T1 & p1, const T2 & p2)
336 		{
337 			T* pObject = AllocObject();
338 			new(pObject) T(p1, p2);
339 			return pObject;
340 		}
341 
342 		template<typename T1, typename T2, typename T3>
Push(const T1 & p1,const T2 & p2,const T3 & p3)343 		T* Push(const T1 & p1, const T2 & p2, const T3 & p3)
344 		{
345 			T* pObject = AllocObject();
346 			new(pObject) T(p1, p2, p3);
347 			return pObject;
348 		}
349 
350 		template<typename T1, typename T2, typename T3, typename T4>
Push(const T1 & p1,const T2 & p2,const T3 & p3,const T4 & p4)351 		T* Push(const T1 & p1, const T2 & p2, const T3 & p3, const T4 & p4)
352 		{
353 			T* pObject = AllocObject();
354 			new(pObject) T(p1, p2, p3, p4);
355 			return pObject;
356 		}
357 	};
358 
359 
360 	//
361 	// Strong scope stack
362 	//
363 	//  Strong stack, which means that any data from the stack is always valid, until you call Reset();
364 	//  Strong stack uses a lot of memory, but in the case of deferred use of stack entires you can store single pointer to current stack entry.
365 	//
366 	//  Well suited as CPU profiler timings stack.
367 	//
368 	////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
369 	template<typename T, uint32 capacity>
370 	class StrongScopeStack
371 	{
372 		static const int32 ALIGNMENT = 16;
373 		static const int32 ALIGNMENT_MASK = (ALIGNMENT-1);
374 
375 		int32 count;
376 		int32 top;
377 
378 		//max stack deep
379 		std::array<int32, 256> stackId;
380 
381 		//additional bytes for alignment
382 		byte rawMemory_[ capacity * sizeof(T) + ALIGNMENT ];
383 
384 
IndexToObject(int32 index)385 		T* IndexToObject(int32 index)
386 		{
387 			byte* alignedMemory = (byte*)( ( (uintptr_t)&rawMemory_[0] + ALIGNMENT_MASK ) & ~(uintptr_t)ALIGNMENT_MASK );
388 			T* pObjectMemory = (T*)(alignedMemory + index * sizeof(T));
389 			return pObjectMemory;
390 		}
391 
AllocObject()392 		T* AllocObject()
393 		{
394 			int32 stackIndex = top;
395 			MT_VERIFY(stackIndex < (int32)stackId.size(), "Stack is too deep.", return nullptr);
396 			top++;
397 
398 			int32 index = count;
399 			MT_VERIFY(index < (int32)capacity, "Stack allocator overflow. Can't allocate more memory.", return nullptr);
400 			count++;
401 			T* pObject = IndexToObject(index);
402 
403 			stackId[stackIndex] = (index + 1);
404 
405 			return pObject;
406 		}
407 
408 
409 	public:
410 
StrongScopeStack()411 		StrongScopeStack()
412 		{
413 			static_assert(std::is_base_of<MT::ScopeStackEntry, T>::value, "Type must be derived from MT::ScopeStackEntry");
414 			top = invalidStackId;
415 			count = 0;
416 		}
417 
~StrongScopeStack()418 		~StrongScopeStack()
419 		{
420 			Reset();
421 		}
422 
Get(int32 id)423 		T* Get(int32 id)
424 		{
425 			MT_VERIFY(id > invalidStackId, "Invalid id", return nullptr);
426 			int32 index = (id - 1);
427 			return IndexToObject(index);
428 		}
429 
Top()430 		int32 Top()
431 		{
432 			if (top == invalidStackId)
433 			{
434 				return invalidStackId;
435 			}
436 
437 			return stackId[top - 1];
438 		}
439 
Pop()440 		void Pop()
441 		{
442 			top--;
443 			int32 index = top;
444 			MT_ASSERT(index >= 0, "Stack already empty. Invalid call.");
445 
446 			stackId[index] = 0;
447 		}
448 
449 
Push()450 		T* Push()
451 		{
452 			T* pObject = AllocObject();
453 			new(pObject) T();
454 			return pObject;
455 		}
456 
457 		template<typename T1, typename T2>
Push(T1 p1,T2 p2)458 		T* Push(T1 p1, T2 p2)
459 		{
460 			T* pObject = AllocObject();
461 			new(pObject) T(p1, p2);
462 			return pObject;
463 		}
464 
465 		template<typename T1, typename T2, typename T3>
Push(T1 p1,T2 p2,T3 p3)466 		T* Push(T1 p1, T2 p2, T3 p3)
467 		{
468 			T* pObject = AllocObject();
469 			new(pObject) T(p1, p2, p3);
470 			return pObject;
471 		}
472 
473 		template<typename T1, typename T2, typename T3, typename T4>
Push(T1 p1,T2 p2,T3 p3,T4 p4)474 		T* Push(T1 p1, T2 p2, T3 p3, T4 p4)
475 		{
476 			T* pObject = AllocObject();
477 			new(pObject) T(p1, p2, p3, p4);
478 			return pObject;
479 		}
480 
481 
Reset()482 		void Reset()
483 		{
484 			for(int32 i = 0; i < count; i++)
485 			{
486 				T* pObject = IndexToObject(i);
487 				MT_UNUSED(pObject);
488 				pObject->~T();
489 			}
490 
491 #if MT_DEBUG
492 			int32 stackIdCount = (int32)stackId.size();
493 			for(int32 i = 0; i < stackIdCount; i++)
494 			{
495 				stackId[i] = std::numeric_limits<int32>::lowest();
496 			}
497 #endif
498 			count = 0;
499 			top = invalidStackId;
500 		}
501 
502 	};
503 
504 } //MT namespace
505 
506 
507 #define SCOPE_CONCAT_IMPL(x, y) x##y
508 #define SCOPE_CONCAT(x, y) SCOPE_CONCAT_IMPL(x, y)
509 
510 #define MT_SCOPE_NOT_INITIALIZED (0)
511 #define MT_SCOPE_NOT_YET_INITIALIZED (-1)
512 
513 #define DECLARE_SCOPE_DESCRIPTOR_IMPL_PRE( file, line, name, storagePointer, resultID ) \
514 	\
515 	static MT::Atomic32Base<int32> SCOPE_CONCAT(scope_descriptorIndex_, line) = { MT_SCOPE_NOT_INITIALIZED }; \
516 	static_assert(std::is_pod< MT::Atomic32Base<int32> >::value == true, "AtomicInt32Base type should be POD, to be placed in bss/data section"); \
517 	\
518 	int32 SCOPE_CONCAT(scope_descId_, line) = MT_SCOPE_NOT_INITIALIZED; \
519 	\
520 	int32 SCOPE_CONCAT(scope_state_, line) = SCOPE_CONCAT(scope_descriptorIndex_, line).CompareAndSwap(MT_SCOPE_NOT_INITIALIZED, MT_SCOPE_NOT_YET_INITIALIZED); \
521 	switch(SCOPE_CONCAT(scope_state_, line)) \
522 	{ \
523 		/* first time here, need to allocate descriptor*/ \
524 		case MT_SCOPE_NOT_INITIALIZED: \
525 		{ \
526 			MT_ASSERT( storagePointer != nullptr, "Scopes storage pointer was not initialized!"); \
527 
528 
529 
530 #define DECLARE_SCOPE_DESCRIPTOR_IMPL_POST( file, line, name, storagePointer, resultID ) \
531 			SCOPE_CONCAT(scope_descriptorIndex_, line).Store( SCOPE_CONCAT(scope_descId_, line) );  \
532 			break; \
533 		} \
534 		\
535 		/* allocation in progress */ \
536 		/* wait until the allocation is finished */ \
537 		case MT_SCOPE_NOT_YET_INITIALIZED: \
538 		{ \
539 			for(;;) \
540 			{ \
541 				SCOPE_CONCAT(scope_descId_, line) = SCOPE_CONCAT(scope_descriptorIndex_, line).Load(); \
542 				if (SCOPE_CONCAT(scope_descId_, line) != MT_SCOPE_NOT_YET_INITIALIZED) \
543 				{ \
544 					break; \
545 				} \
546 				MT::YieldProcessor(); \
547 			} \
548 			break; \
549 		} \
550 		/* description already allocated */ \
551 		default: \
552 		{ \
553 			SCOPE_CONCAT(scope_descId_, line) = SCOPE_CONCAT(scope_state_, line); \
554 			break; \
555 		} \
556 	} \
557 	resultID = SCOPE_CONCAT(scope_descId_, line);
558 
559 
560 
561 
562 #define DECLARE_SCOPE_DESCRIPTOR_IMPL( file, line, name, storagePointer, resultID ) \
563 	DECLARE_SCOPE_DESCRIPTOR_IMPL_PRE(file, line, name, storagePointer, resultID); \
564 		SCOPE_CONCAT(scope_descId_, line) = storagePointer -> Alloc(file, line, name); \
565 	DECLARE_SCOPE_DESCRIPTOR_IMPL_POST(file, line, name, storagePointer, resultID); \
566 
567 
568 #define DECLARE_SCOPE_DESCRIPTOR_IMPL1( file, line, name, storagePointer, resultID, param1) \
569 	DECLARE_SCOPE_DESCRIPTOR_IMPL_PRE(file, line, name, storagePointer, resultID); \
570 		SCOPE_CONCAT(scope_descId_, line) = storagePointer -> Alloc(file, line, name, param1); \
571 	DECLARE_SCOPE_DESCRIPTOR_IMPL_POST(file, line, name, storagePointer, resultID); \
572 
573 #define DECLARE_SCOPE_DESCRIPTOR_IMPL2( file, line, name, storagePointer, resultID, param1, param2) \
574 	DECLARE_SCOPE_DESCRIPTOR_IMPL_PRE(file, line, name, storagePointer, resultID); \
575 		SCOPE_CONCAT(scope_descId_, line) = storagePointer -> Alloc(file, line, name, param1, param2); \
576 	DECLARE_SCOPE_DESCRIPTOR_IMPL_POST(file, line, name, storagePointer, resultID); \
577 
578 
579 
580 // declare scope descriptor for current scope.
581 #define DECLARE_SCOPE_DESCRIPTOR(name, storagePointer, resultID) DECLARE_SCOPE_DESCRIPTOR_IMPL(__FILE__, __LINE__, name, storagePointer, resultID)
582 
583 #define DECLARE_SCOPE_DESCRIPTOR1(name, storagePointer, resultID, param1) DECLARE_SCOPE_DESCRIPTOR_IMPL1(__FILE__, __LINE__, name, storagePointer, resultID, param1)
584 
585 #define DECLARE_SCOPE_DESCRIPTOR2(name, storagePointer, resultID, param1, param2) DECLARE_SCOPE_DESCRIPTOR_IMPL2(__FILE__, __LINE__, name, storagePointer, resultID, param1, param2)
586 
587 // push new stack entry to stack
588 #define SCOPE_STACK_PUSH(scopeDescriptorId, stackPointer) \
589 	MT_ASSERT(stackPointer != nullptr, "Stack pointer is not initialized for current thread."); \
590 	int32 SCOPE_CONCAT(scope_stackParentId_, __LINE__) = stackPointer -> Top(); \
591 	MT_ASSERT(SCOPE_CONCAT(scope_stackParentId_, __LINE__) >= 0, "Invalid parent ID"); \
592 	stackPointer -> Push(SCOPE_CONCAT(scope_stackParentId_, __LINE__), scopeDescriptorId); \
593 
594 
595 // push new stack entry to stack
596 #define SCOPE_STACK_PUSH1(scopeDescriptorId, param1, stackPointer) \
597 	MT_ASSERT(stackPointer != nullptr, "Stack pointer is not initialized for current thread."); \
598 	int32 SCOPE_CONCAT(scope_stackParentId_, __LINE__) = stackPointer -> Top(); \
599 	MT_ASSERT(SCOPE_CONCAT(scope_stackParentId_, __LINE__) >= 0, "Invalid parent ID"); \
600 	stackPointer -> Push(SCOPE_CONCAT(scope_stackParentId_, __LINE__), scopeDescriptorId, param1); \
601 
602 // push new stack entry to stack
603 #define SCOPE_STACK_PUSH2(scopeDescriptorId, param1, param2, stackPointer) \
604 	MT_ASSERT(stackPointer != nullptr, "Stack pointer is not initialized for current thread."); \
605 	int32 SCOPE_CONCAT(scope_stackParentId_, __LINE__) = stackPointer -> Top(); \
606 	MT_ASSERT(SCOPE_CONCAT(scope_stackParentId_, __LINE__) >= 0, "Invalid parent ID"); \
607 	stackPointer -> Push(SCOPE_CONCAT(scope_stackParentId_, __LINE__), scopeDescriptorId, param1, param2); \
608 
609 
610 // pop from the stack
611 #define SCOPE_STACK_POP(stackPointer) \
612 	MT_ASSERT(stackPointer != nullptr, "Stack pointer is not initialized for current thread."); \
613 	stackPointer -> Pop(); \
614 
615 // get top of the stack
616 #define SCOPE_STACK_TOP(stackPointer) \
617 	stackPointer -> Get( stackPointer -> Top() )
618 
619 
620 #define SCOPE_STACK_GET_PARENT(stackEntry, stackPointer) \
621 	(stackEntry -> GetParentId() == MT::invalidStackId) ? nullptr : stackPointer -> Get( stackEntry -> GetParentId() )
622 
623 
624 #endif
625