Ark Server API (ASA) - Wiki
Loading...
Searching...
No Matches
WindowsPlatformAtomics.h
Go to the documentation of this file.
1// Copyright Epic Games, Inc. All Rights Reserved.
2
3#pragma once
4#include "CoreTypes.h"
5#include "GenericPlatform/GenericPlatformAtomics.h"
6#include "Windows/WindowsSystemIncludes.h"
7#include <intrin.h>
8
9/**
10 * Windows implementation of the Atomics OS functions
11 */
14{
15 static_assert(sizeof(int8) == sizeof(char) && alignof(int8) == alignof(char), "int8 must be compatible with char");
16 static_assert(sizeof(int16) == sizeof(short) && alignof(int16) == alignof(short), "int16 must be compatible with short");
17 static_assert(sizeof(int32) == sizeof(long) && alignof(int32) == alignof(long), "int32 must be compatible with long");
18 static_assert(sizeof(int64) == sizeof(long long) && alignof(int64) == alignof(long long), "int64 must be compatible with long long");
19
20 static FORCEINLINE int8 InterlockedIncrement( volatile int8* Value )
21 {
22 return (int8)::_InterlockedExchangeAdd8((char*)Value, 1) + 1;
23 }
24
25 static FORCEINLINE int16 InterlockedIncrement( volatile int16* Value )
26 {
27 return (int16)::_InterlockedIncrement16((short*)Value);
28 }
29
30 static FORCEINLINE int32 InterlockedIncrement( volatile int32* Value )
31 {
32 return (int32)::_InterlockedIncrement((long*)Value);
33 }
34
35 static FORCEINLINE int64 InterlockedIncrement( volatile int64* Value )
36 {
38 return (int64)::_InterlockedIncrement64((long long*)Value);
39 #else
40 // No explicit instruction for 64-bit atomic increment on 32-bit processors; has to be implemented in terms of CMPXCHG8B
41 for (;;)
42 {
43 int64 OldValue = *Value;
44 if (_InterlockedCompareExchange64(Value, OldValue + 1, OldValue) == OldValue)
45 {
46 return OldValue + 1;
47 }
48 }
49 #endif
50 }
51
52 static FORCEINLINE int8 InterlockedDecrement( volatile int8* Value )
53 {
54 return (int8)::_InterlockedExchangeAdd8((char*)Value, -1) - 1;
55 }
56
57 static FORCEINLINE int16 InterlockedDecrement( volatile int16* Value )
58 {
59 return (int16)::_InterlockedDecrement16((short*)Value);
60 }
61
62 static FORCEINLINE int32 InterlockedDecrement( volatile int32* Value )
63 {
64 return (int32)::_InterlockedDecrement((long*)Value);
65 }
66
67 static FORCEINLINE int64 InterlockedDecrement( volatile int64* Value )
68 {
70 return (int64)::_InterlockedDecrement64((long long*)Value);
71 #else
72 // No explicit instruction for 64-bit atomic decrement on 32-bit processors; has to be implemented in terms of CMPXCHG8B
73 for (;;)
74 {
75 int64 OldValue = *Value;
76 if (_InterlockedCompareExchange64(Value, OldValue - 1, OldValue) == OldValue)
77 {
78 return OldValue - 1;
79 }
80 }
81 #endif
82 }
83
84 static FORCEINLINE int8 InterlockedAdd( volatile int8* Value, int8 Amount )
85 {
86 return (int8)::_InterlockedExchangeAdd8((char*)Value, (char)Amount);
87 }
88
89 static FORCEINLINE int16 InterlockedAdd( volatile int16* Value, int16 Amount )
90 {
91 return (int16)::_InterlockedExchangeAdd16((short*)Value, (short)Amount);
92 }
93
94 static FORCEINLINE int32 InterlockedAdd( volatile int32* Value, int32 Amount )
95 {
96 return (int32)::_InterlockedExchangeAdd((long*)Value, (long)Amount);
97 }
98
99 static FORCEINLINE int64 InterlockedAdd( volatile int64* Value, int64 Amount )
100 {
102 return (int64)::_InterlockedExchangeAdd64((int64*)Value, (int64)Amount);
103 #else
104 // No explicit instruction for 64-bit atomic add on 32-bit processors; has to be implemented in terms of CMPXCHG8B
105 for (;;)
106 {
107 int64 OldValue = *Value;
108 if (_InterlockedCompareExchange64(Value, OldValue + Amount, OldValue) == OldValue)
109 {
110 return OldValue;
111 }
112 }
113 #endif
114 }
115
116 static FORCEINLINE int8 InterlockedExchange( volatile int8* Value, int8 Exchange )
117 {
118 return (int8)::_InterlockedExchange8((char*)Value, (char)Exchange);
119 }
120
121 static FORCEINLINE int16 InterlockedExchange( volatile int16* Value, int16 Exchange )
122 {
123 return (int16)::_InterlockedExchange16((short*)Value, (short)Exchange);
124 }
125
126 static FORCEINLINE int32 InterlockedExchange( volatile int32* Value, int32 Exchange )
127 {
128 return (int32)::_InterlockedExchange((long*)Value, (long)Exchange);
129 }
130
131 static FORCEINLINE int64 InterlockedExchange( volatile int64* Value, int64 Exchange )
132 {
134 return (int64)::_InterlockedExchange64((long long*)Value, (long long)Exchange);
135 #else
136 // No explicit instruction for 64-bit atomic exchange on 32-bit processors; has to be implemented in terms of CMPXCHG8B
137 for (;;)
138 {
139 int64 OldValue = *Value;
140 if (_InterlockedCompareExchange64(Value, Exchange, OldValue) == OldValue)
141 {
142 return OldValue;
143 }
144 }
145 #endif
146 }
147
148 static FORCEINLINE void* InterlockedExchangePtr( void*volatile* Dest, void* Exchange )
149 {
151 if (IsAligned(Dest, alignof(void*)) == false)
152 {
153 HandleAtomicsFailure(TEXT("InterlockedExchangePointer requires Dest pointer to be aligned to %d bytes"), (int)alignof(void*));
154 }
155 #endif
156
157 return ::_InterlockedExchangePointer(Dest, Exchange);
158 }
159
160 static FORCEINLINE int8 InterlockedCompareExchange2( volatile int8* Dest, int8 Exchange, int8 Comparand )
161 {
162 return (int8)::_InterlockedCompareExchange8((char*)Dest, (char)Exchange, (char)Comparand);
163 }
164
165 static FORCEINLINE int16 InterlockedCompareExchange2( volatile int16* Dest, int16 Exchange, int16 Comparand )
166 {
167 return (int16)::_InterlockedCompareExchange16((short*)Dest, (short)Exchange, (short)Comparand);
168 }
169
170 static FORCEINLINE int32 InterlockedCompareExchange2( volatile int32* Dest, int32 Exchange, int32 Comparand )
171 {
172 return (int32)::_InterlockedCompareExchange((long*)Dest, (long)Exchange, (long)Comparand);
173 }
174
175 static FORCEINLINE int64 InterlockedCompareExchange2( volatile int64* Dest, int64 Exchange, int64 Comparand )
176 {
178 if (IsAligned(Dest, alignof(int64)) == false)
179 {
180 HandleAtomicsFailure(TEXT("InterlockedCompareExchange int64 requires Dest pointer to be aligned to %d bytes"), (int)alignof(int64));
181 }
182 #endif
183
184 return (int64)::_InterlockedCompareExchange64(Dest, Exchange, Comparand);
185 }
186
187 static FORCEINLINE int8 InterlockedAnd(volatile int8* Value, const int8 AndValue)
188 {
189 return (int8)::_InterlockedAnd8((volatile char*)Value, (char)AndValue);
190 }
191
192 static FORCEINLINE int16 InterlockedAnd(volatile int16* Value, const int16 AndValue)
193 {
194 return (int16)::_InterlockedAnd16((volatile short*)Value, (short)AndValue);
195 }
196
197 static FORCEINLINE int32 InterlockedAnd(volatile int32* Value, const int32 AndValue)
198 {
199 return (int32)::_InterlockedAnd((volatile long*)Value, (long)AndValue);
200 }
201
202 static FORCEINLINE int64 InterlockedAnd(volatile int64* Value, const int64 AndValue)
203 {
205 return (int64)::_InterlockedAnd64((volatile long long*)Value, (long long)AndValue);
206 #else
207 // No explicit instruction for 64-bit atomic and on 32-bit processors; has to be implemented in terms of CMPXCHG8B
208 for (;;)
209 {
210 const int64 OldValue = *Value;
211 if (_InterlockedCompareExchange64(Value, OldValue & AndValue, OldValue) == OldValue)
212 {
213 return OldValue;
214 }
215 }
216 #endif
217 }
218
219 static FORCEINLINE int8 InterlockedOr(volatile int8* Value, const int8 OrValue)
220 {
221 return (int8)::_InterlockedOr8((volatile char*)Value, (char)OrValue);
222 }
223
224 static FORCEINLINE int16 InterlockedOr(volatile int16* Value, const int16 OrValue)
225 {
226 return (int16)::_InterlockedOr16((volatile short*)Value, (short)OrValue);
227 }
228
229 static FORCEINLINE int32 InterlockedOr(volatile int32* Value, const int32 OrValue)
230 {
231 return (int32)::_InterlockedOr((volatile long*)Value, (long)OrValue);
232 }
233
234 static FORCEINLINE int64 InterlockedOr(volatile int64* Value, const int64 OrValue)
235 {
237 return (int64)::_InterlockedOr64((volatile long long*)Value, (long long)OrValue);
238 #else
239 // No explicit instruction for 64-bit atomic or on 32-bit processors; has to be implemented in terms of CMPXCHG8B
240 for (;;)
241 {
242 const int64 OldValue = *Value;
243 if (_InterlockedCompareExchange64(Value, OldValue | OrValue, OldValue) == OldValue)
244 {
245 return OldValue;
246 }
247 }
248 #endif
249 }
250
251 static FORCEINLINE int8 InterlockedXor(volatile int8* Value, const int8 XorValue)
252 {
253 return (int8)::_InterlockedXor8((volatile char*)Value, (char)XorValue);
254 }
255
256 static FORCEINLINE int16 InterlockedXor(volatile int16* Value, const int16 XorValue)
257 {
258 return (int16)::_InterlockedXor16((volatile short*)Value, (short)XorValue);
259 }
260
261 static FORCEINLINE int32 InterlockedXor(volatile int32* Value, const int32 XorValue)
262 {
263 return (int32)::_InterlockedXor((volatile long*)Value, (int32)XorValue);
264 }
265
266 static FORCEINLINE int64 InterlockedXor(volatile int64* Value, const int64 XorValue)
267 {
269 return (int64)::_InterlockedXor64((volatile long long*)Value, (long long)XorValue);
270 #else
271 // No explicit instruction for 64-bit atomic xor on 32-bit processors; has to be implemented in terms of CMPXCHG8B
272 for (;;)
273 {
274 const int64 OldValue = *Value;
275 if (_InterlockedCompareExchange64(Value, OldValue ^ XorValue, OldValue) == OldValue)
276 {
277 return OldValue;
278 }
279 }
280 #endif
281 }
282
283 static FORCEINLINE int8 AtomicRead(volatile const int8* Src)
284 {
285 return InterlockedCompareExchange2((int8*)Src, 0, 0);
286 }
287
288 static FORCEINLINE int16 AtomicRead(volatile const int16* Src)
289 {
290 return InterlockedCompareExchange2((int16*)Src, 0, 0);
291 }
292
293 static FORCEINLINE int32 AtomicRead(volatile const int32* Src)
294 {
295 return InterlockedCompareExchange2((int32*)Src, 0, 0);
296 }
297
298 static FORCEINLINE int64 AtomicRead(volatile const int64* Src)
299 {
300 return InterlockedCompareExchange2((int64*)Src, 0, 0);
301 }
302
303 static FORCEINLINE int8 AtomicRead_Relaxed(volatile const int8* Src)
304 {
305 return *Src;
306 }
307
308 static FORCEINLINE int16 AtomicRead_Relaxed(volatile const int16* Src)
309 {
310 return *Src;
311 }
312
313 static FORCEINLINE int32 AtomicRead_Relaxed(volatile const int32* Src)
314 {
315 return *Src;
316 }
317
318 static FORCEINLINE int64 AtomicRead_Relaxed(volatile const int64* Src)
319 {
321 return *Src;
322 #else
323 return InterlockedCompareExchange2((volatile int64*)Src, 0, 0);
324 #endif
325 }
326
327 static FORCEINLINE void AtomicStore(volatile int8* Src, int8 Val)
328 {
330 }
331
332 static FORCEINLINE void AtomicStore(volatile int16* Src, int16 Val)
333 {
335 }
336
337 static FORCEINLINE void AtomicStore(volatile int32* Src, int32 Val)
338 {
340 }
341
342 static FORCEINLINE void AtomicStore(volatile int64* Src, int64 Val)
343 {
345 }
346
347 static FORCEINLINE void AtomicStore_Relaxed(volatile int8* Src, int8 Val)
348 {
349 *Src = Val;
350 }
351
352 static FORCEINLINE void AtomicStore_Relaxed(volatile int16* Src, int16 Val)
353 {
354 *Src = Val;
355 }
356
357 static FORCEINLINE void AtomicStore_Relaxed(volatile int32* Src, int32 Val)
358 {
359 *Src = Val;
360 }
361
362 static FORCEINLINE void AtomicStore_Relaxed(volatile int64* Src, int64 Val)
363 {
365 *Src = Val;
366 #else
367 InterlockedExchange(Src, Val);
368 #endif
369 }
370
371 UE_DEPRECATED(4.19, "AtomicRead64 has been deprecated, please use AtomicRead's overload instead")
372 static FORCEINLINE int64 AtomicRead64(volatile const int64* Src)
373 {
374 return AtomicRead(Src);
375 }
376
377 /**
378 * The function compares the Destination value with the Comparand value:
379 * - If the Destination value is equal to the Comparand value, the Exchange value is stored in the address specified by Destination,
380 * - Otherwise, the initial value of the Destination parameter is stored in the address specified specified by Comparand.
381 *
382 * @return true if Comparand equals the original value of the Destination parameter, or false if Comparand does not equal the original value of the Destination parameter.
383 *
384 * Early AMD64 processors lacked the CMPXCHG16B instruction.
385 * To determine whether the processor supports this operation, call the IsProcessorFeaturePresent function with PF_COMPARE64_EXCHANGE128.
386 */
389 {
390#if !(UE_BUILD_SHIPPING || UE_BUILD_TEST)
391 if (IsAligned(Dest,16) == false)
392 {
393 HandleAtomicsFailure(TEXT("InterlockedCompareExchange128 requires Dest pointer to be aligned to 16 bytes") );
394 }
395 if (IsAligned(Comparand,16) == false)
396 {
397 HandleAtomicsFailure(TEXT("InterlockedCompareExchange128 requires Comparand pointer to be aligned to 16 bytes") );
398 }
399#endif
400
402 }
403 /**
404 * Atomic read of 128 bit value with a memory barrier
405 */
406 static FORCEINLINE void AtomicRead128(const volatile FInt128* Src, FInt128* OutResult)
407 {
409 Zero.High = 0;
410 Zero.Low = 0;
411 *OutResult = Zero;
413 }
414
415#endif // PLATFORM_HAS_128BIT_ATOMICS
416
417 static FORCEINLINE void* InterlockedCompareExchangePointer( void*volatile* Dest, void* Exchange, void* Comparand )
418 {
420 if (IsAligned(Dest, alignof(void*)) == false)
421 {
422 HandleAtomicsFailure(TEXT("InterlockedCompareExchangePointer requires Dest pointer to be aligned to %d bytes"), (int)alignof(void*));
423 }
424 #endif
425
426 return ::_InterlockedCompareExchangePointer(Dest, Exchange, Comparand);
427 }
428
429 /**
430 * @return true, if the processor we are running on can execute compare and exchange 128-bit operation.
431 * @see cmpxchg16b, early AMD64 processors don't support this operation.
432 */
434 {
436 }
437
438protected:
439 /**
440 * Handles atomics function failure.
441 *
442 * Since 'check' has not yet been declared here we need to call external function to use it.
443 *
444 * @param InFormat - The string format string.
445 */
446 static void HandleAtomicsFailure( const TCHAR* InFormat, ... );
447};
448
449
450
#define UE_BUILD_TEST
Definition Build.h:23
#define UE_BUILD_SHIPPING
Definition Build.h:4
#define UE_DEPRECATED(Version, Message)
#define WINDOWS_PF_COMPARE_EXCHANGE128
#define FORCEINLINE
Definition Platform.h:644
#define PLATFORM_HAS_128BIT_ATOMICS
Definition Platform.h:437
#define PLATFORM_64BITS
FWindowsPlatformAtomics FPlatformAtomics
MINIMAL_WINDOWS_API BOOL WINAPI IsProcessorFeaturePresent(DWORD ProcessorFeature)
static FORCEINLINE void AtomicStore_Relaxed(volatile int32 *Src, int32 Val)
static FORCEINLINE int8 InterlockedAdd(volatile int8 *Value, int8 Amount)
static FORCEINLINE int32 InterlockedExchange(volatile int32 *Value, int32 Exchange)
static FORCEINLINE int32 InterlockedOr(volatile int32 *Value, const int32 OrValue)
static FORCEINLINE int64 AtomicRead64(volatile const int64 *Src)
static FORCEINLINE int32 InterlockedIncrement(volatile int32 *Value)
static FORCEINLINE int32 InterlockedCompareExchange2(volatile int32 *Dest, int32 Exchange, int32 Comparand)
static FORCEINLINE int8 InterlockedCompareExchange2(volatile int8 *Dest, int8 Exchange, int8 Comparand)
static FORCEINLINE int8 InterlockedOr(volatile int8 *Value, const int8 OrValue)
static FORCEINLINE int8 InterlockedAnd(volatile int8 *Value, const int8 AndValue)
static FORCEINLINE int32 InterlockedDecrement(volatile int32 *Value)
static FORCEINLINE int8 AtomicRead_Relaxed(volatile const int8 *Src)
static FORCEINLINE int8 AtomicRead(volatile const int8 *Src)
static FORCEINLINE int64 InterlockedXor(volatile int64 *Value, const int64 XorValue)
static FORCEINLINE int32 AtomicRead_Relaxed(volatile const int32 *Src)
static FORCEINLINE void * InterlockedCompareExchangePointer(void *volatile *Dest, void *Exchange, void *Comparand)
static FORCEINLINE void AtomicStore(volatile int8 *Src, int8 Val)
static FORCEINLINE int16 InterlockedXor(volatile int16 *Value, const int16 XorValue)
static FORCEINLINE int16 InterlockedAdd(volatile int16 *Value, int16 Amount)
static FORCEINLINE int64 InterlockedCompareExchange2(volatile int64 *Dest, int64 Exchange, int64 Comparand)
static FORCEINLINE int64 InterlockedOr(volatile int64 *Value, const int64 OrValue)
static FORCEINLINE int16 InterlockedDecrement(volatile int16 *Value)
static FORCEINLINE int64 InterlockedExchange(volatile int64 *Value, int64 Exchange)
static FORCEINLINE int64 InterlockedAdd(volatile int64 *Value, int64 Amount)
static void HandleAtomicsFailure(const TCHAR *InFormat,...)
static FORCEINLINE int8 InterlockedXor(volatile int8 *Value, const int8 XorValue)
static FORCEINLINE int64 AtomicRead(volatile const int64 *Src)
static FORCEINLINE int32 InterlockedAnd(volatile int32 *Value, const int32 AndValue)
static FORCEINLINE int16 InterlockedAnd(volatile int16 *Value, const int16 AndValue)
static FORCEINLINE void AtomicStore(volatile int64 *Src, int64 Val)
static FORCEINLINE int16 InterlockedIncrement(volatile int16 *Value)
static FORCEINLINE void AtomicStore_Relaxed(volatile int64 *Src, int64 Val)
static FORCEINLINE int16 InterlockedCompareExchange2(volatile int16 *Dest, int16 Exchange, int16 Comparand)
static FORCEINLINE int64 AtomicRead_Relaxed(volatile const int64 *Src)
static FORCEINLINE void AtomicStore_Relaxed(volatile int16 *Src, int16 Val)
static FORCEINLINE int64 InterlockedAnd(volatile int64 *Value, const int64 AndValue)
static FORCEINLINE int16 AtomicRead(volatile const int16 *Src)
static FORCEINLINE int32 InterlockedAdd(volatile int32 *Value, int32 Amount)
static FORCEINLINE int16 AtomicRead_Relaxed(volatile const int16 *Src)
static FORCEINLINE void AtomicStore(volatile int32 *Src, int32 Val)
static FORCEINLINE int16 InterlockedExchange(volatile int16 *Value, int16 Exchange)
static FORCEINLINE int32 InterlockedXor(volatile int32 *Value, const int32 XorValue)
static FORCEINLINE void AtomicStore(volatile int16 *Src, int16 Val)
static FORCEINLINE int8 InterlockedDecrement(volatile int8 *Value)
static FORCEINLINE int16 InterlockedOr(volatile int16 *Value, const int16 OrValue)
static FORCEINLINE int64 InterlockedIncrement(volatile int64 *Value)
static FORCEINLINE int8 InterlockedExchange(volatile int8 *Value, int8 Exchange)
static FORCEINLINE int32 AtomicRead(volatile const int32 *Src)
static FORCEINLINE int8 InterlockedIncrement(volatile int8 *Value)
static FORCEINLINE int64 InterlockedDecrement(volatile int64 *Value)
static FORCEINLINE void AtomicStore_Relaxed(volatile int8 *Src, int8 Val)
static FORCEINLINE void * InterlockedExchangePtr(void *volatile *Dest, void *Exchange)
static FORCEINLINE bool CanUseCompareExchange128()