7#if defined __cplusplus_cli && !PLATFORM_HOLOLENS
9#include "Math/UnrealMathFPU.h"
12#pragma warning( push )
14#pragma warning ( disable : 4668
)
15#include <DirectXMath.h>
17#include <DirectXPackedVector.h>
20
21
24
25
30#define DECLARE_VECTOR_REGISTER(X, Y, Z, W) { X, Y, Z, W }
34
35
36
37
38
39
40
41
42FORCEINLINE VectorRegister MakeVectorRegister( uint32 X, uint32 Y, uint32 Z, uint32 W )
44 using namespace DirectX;
45 return DirectX::XMVectorSetInt( X, Y, Z, W );
49
50
51
52
53
54
55
56
57FORCEINLINE VectorRegister MakeVectorRegister(
float X,
float Y,
float Z,
float W )
59 return DirectX::XMVectorSet( X, Y, Z, W );
63
64
65
66
67
68
69
70
71FORCEINLINE VectorRegister4Int MakeVectorRegisterInt(int32 X, int32 Y, int32 Z, int32 W)
73 return _mm_castps_si128(DirectX::XMVectorSetInt(X, Y, Z, W));
76FORCEINLINE
constexpr VectorRegister4Int MakeVectorRegisterIntConstant(int32 X, int32 Y, int32 Z, int32 W)
78 return {
static_cast<
char>(X >> 0),
static_cast<
char>(X >> 8),
static_cast<
char>(X >> 16),
static_cast<
char>(X >> 24),
79 static_cast<
char>(Y >> 0),
static_cast<
char>(Y >> 8),
static_cast<
char>(Y >> 16),
static_cast<
char>(Y >> 24),
80 static_cast<
char>(Z >> 0),
static_cast<
char>(Z >> 8),
static_cast<
char>(Z >> 16),
static_cast<
char>(Z >> 24),
81 static_cast<
char>(W >> 0),
static_cast<
char>(W >> 8),
static_cast<
char>(W >> 16),
static_cast<
char>(W >> 24)};
84FORCEINLINE
constexpr VectorRegister4Float MakeVectorRegisterFloatConstant(
float X,
float Y,
float Z,
float W)
86 return VectorRegister4Float { X, Y, Z, W };
89FORCEINLINE
constexpr VectorRegister2Double MakeVectorRegister2DoubleConstant(
double X,
double Y)
91 return VectorRegister2Double { X, Y };
95
96
98#include "Math/UnrealMathVectorConstants.h"
102
103
106
107
108
109
110#define VectorZero() DirectX::XMVectorZero()
113
114
115
116
117#define VectorOne() DirectX::g_XMOne.v
120
121
122
123
124
125#define VectorLoad( Ptr ) DirectX::XMLoadFloat4( (const DirectX::XMFLOAT4*)(Ptr) )
128
129
130
131
132
133#define VectorLoadFloat3( Ptr ) MakeVectorRegister( ((const float*)(Ptr))[0
], ((const float*)(Ptr))[1
], ((const float*)(Ptr))[2
], 0.0f
)
137
138
139
140
141
142#define VectorLoadFloat3_W0( Ptr ) MakeVectorRegister( ((const float*)(Ptr))[0
], ((const float*)(Ptr))[1
], ((const float*)(Ptr))[2
], 0.0f
)
146
147
148
149
150
151#define VectorLoadFloat3_W1( Ptr ) MakeVectorRegister( ((const float*)(Ptr))[0
], ((const float*)(Ptr))[1
], ((const float*)(Ptr))[2
], 1.0f
)
154
155
156
157
158
159#define VectorLoadAligned( Ptr ) DirectX::XMLoadFloat4A( (const DirectX::XMFLOAT4A*)(Ptr) )
162
163
164
165
166
167#define VectorLoadFloat1( Ptr ) DirectX::XMVectorReplicatePtr( (const float*)(Ptr) )
170
171
172
173
174
175#define VectorLoadFloat2( Ptr ) MakeVectorRegister( ((const float*)(Ptr))[0
], ((const float*)(Ptr))[1
], ((const float*)(Ptr))[0
], ((const float*)(Ptr))[1
] )
178
179
180
181
182
183
184
187 __m128 Ret = _mm_castpd_ps(_mm_load_sd((
double const*)(Ptr1)));
188 Ret = _mm_loadh_pi(Ret, (__m64
const*)(Ptr2));
194
195
196
197
198
199
200
201#define VectorSetFloat3( X, Y, Z ) MakeVectorRegister( X, Y, Z, 0.0f
)
204
205
206
207
208
209#define VectorSetFloat1( X ) MakeVectorRegister( X, X, X, X )
212
213
214
215
216
217
218
219
220#define VectorSet( X, Y, Z, W ) MakeVectorRegister( X, Y, Z, W )
223
224
225
226
227
228#define VectorStoreAligned( Vec, Ptr ) DirectX::XMStoreFloat4A((DirectX::XMFLOAT4A*)(Ptr), Vec )
231
232
233
234
235
236#define VectorStoreAlignedStreamed( Vec, Ptr ) XM_STREAM_PS
( (float*)(Ptr), Vec )
239
240
241
242
243
244#define VectorStore( Vec, Ptr ) DirectX::XMStoreFloat4((DirectX::XMFLOAT4*)(Ptr), Vec )
247
248
249
250
251
252#define VectorStoreFloat3( Vec, Ptr ) DirectX::XMStoreFloat3((DirectX::XMFLOAT3*)(Ptr), Vec )
255
256
257
258
259
260#define VectorStoreFloat1( Vec, Ptr ) DirectX::XMStoreFloat((float*)(Ptr), Vec )
264
265
266
267
268
269
272 switch (ComponentIndex)
275 return DirectX::XMVectorGetX(Vec);
277 return DirectX::XMVectorGetY(Vec);
279 return DirectX::XMVectorGetZ(Vec);
281 return DirectX::XMVectorGetW(Vec);
289
290
291
292
293
294
295#define VectorReplicate( Vec, ElementIndex ) DirectX::XMVectorSwizzle<ElementIndex,ElementIndex,ElementIndex,ElementIndex>(Vec)
298
299
300
301
302
303#define VectorAbs( Vec ) DirectX::XMVectorAbs( Vec )
306
307
308
309
310
311#define VectorNegate( Vec ) DirectX::XMVectorNegate( Vec )
314
315
316
317
318
319
320#define VectorAdd( Vec1, Vec2 ) DirectX::XMVectorAdd( Vec1, Vec2 )
323
324
325
326
327
328
329 #define VectorSubtract( Vec1, Vec2 ) DirectX::XMVectorSubtract( Vec1, Vec2 )
332
333
334
335
336
337
338#define VectorMultiply( Vec1, Vec2 ) DirectX::XMVectorMultiply( Vec1, Vec2 )
341
342
343
344
345
346
347#define VectorDivide( Vec1, Vec2 ) DirectX::XMVectorDivide( Vec1, Vec2 )
350
351
352
353
354
355
356
357#define VectorMultiplyAdd( Vec1, Vec2, Vec3 ) DirectX::XMVectorMultiplyAdd( Vec1, Vec2, Vec3 )
360
361
362
363
364
365
366
367#define VectorNegateMultiplyAdd(Vec1, Vec2, Vec3) DirectX::XMVectorNegativeMultiplySubtract( Vec1, Vec2, Vec3 )
370
371
372
373
374
375
376
377#define VectorDot3( Vec1, Vec2 ) DirectX::XMVector3Dot( Vec1, Vec2 )
380
381
382
383
384
385
386
387 #define VectorDot4( Vec1, Vec2 ) DirectX::XMVector4Dot( Vec1, Vec2 )
390
391
392
393
394
395
396#define VectorCompareEQ( Vec1, Vec2 ) DirectX::XMVectorEqual( Vec1, Vec2 )
399
400
401
402
403
404
405#define VectorCompareNE( Vec1, Vec2 ) DirectX::XMVectorNotEqual( Vec1, Vec2 )
408
409
410
411
412
413
414#define VectorCompareGT( Vec1, Vec2 ) DirectX::XMVectorGreater( Vec1, Vec2 )
417
418
419
420
421
422
423#define VectorCompareGE( Vec1, Vec2 ) DirectX::XMVectorGreaterOrEqual( Vec1, Vec2 )
426
427
428
429
430
431
432#define VectorCompareLT( Vec1, Vec2 ) _mm_cmplt_ps( Vec1, Vec2 )
435
436
437
438
439
440
441#define VectorCompareLE( Vec1, Vec2 ) _mm_cmple_ps( Vec1, Vec2 )
444
445
446
447
448
449
450
451
452#define VectorSelect( Mask, Vec1, Vec2 ) DirectX::XMVectorSelect( Vec2, Vec1, Mask )
455
456
457
458
459
460
461#define VectorBitwiseOr( Vec1, Vec2 ) DirectX::XMVectorOrInt( Vec1, Vec2 )
464
465
466
467
468
469
470#define VectorBitwiseAnd( Vec1, Vec2 ) DirectX::XMVectorAndInt( Vec1, Vec2 )
473
474
475
476
477
478
479#define VectorBitwiseXor( Vec1, Vec2 ) DirectX::XMVectorXorInt( Vec1, Vec2 )
482
483
484
485
486
487#define VectorMaskBits( VecMask ) _mm_movemask_ps( VecMask )
492
493
494
495
496
497
498#define VectorCross( Vec1, Vec2 ) DirectX::XMVector3Cross( Vec1, Vec2 )
501
502
503
504
505
506
507#define VectorPow( Vec1, Vec2 ) DirectX::XMVectorPow( Vec1, Vec2 )
511
512
513
514
515
516#define VectorReciprocalSqrt( Vec ) DirectX::XMVectorReciprocalSqrtEst( Vec )
519
520
521
522
523
524#define VectorReciprocal( Vec ) DirectX::XMVectorReciprocalEst( Vec )
527
528
529
530
531
532#define VectorReciprocalLen( Vec ) DirectX::XMVector4ReciprocalLengthEst( Vec )
535
536
537
538
539
540#define VectorReciprocalSqrtAccurate( Vec ) DirectX::XMVectorReciprocalSqrt( Vec )
543
544
545
546
547
548#define VectorReciprocalAccurate( Vec ) DirectX::XMVectorReciprocal( Vec )
551
552
553
554
555
556#define VectorNormalize( Vec ) DirectX::XMVector4NormalizeEst( Vec )
559
560
561
562
563
564#define VectorSet_W0( Vec ) DirectX::XMVectorAndInt( Vec , DirectX::g_XMMask3 )
567
568
569
570
571
572#define VectorSet_W1( Vec ) DirectX::XMVectorPermute<0
,1
,2
,7
>( Vec, VectorOne() )
575
576
577
578
579
580
583 using namespace DirectX;
584 XMMATRIX XMatrix1 = XMLoadFloat4x4A((
const XMFLOAT4X4A*)(Matrix1));
585 XMMATRIX XMatrix2 = XMLoadFloat4x4A((
const XMFLOAT4X4A*)(Matrix2));
586 XMMATRIX XMatrixR = XMMatrixMultiply( XMatrix1, XMatrix2);
587 XMStoreFloat4x4A( (XMFLOAT4X4A*)(Result), XMatrixR);
591
592
593
594
595
598 using namespace DirectX;
599 XMMATRIX XMSrcMatrix = XMLoadFloat4x4A((
const XMFLOAT4X4A*)(SrcMatrix));
600 XMMATRIX XMDstMatrix = XMMatrixInverse(
nullptr, XMSrcMatrix );
601 XMStoreFloat4x4A( (XMFLOAT4X4A*)(DstMatrix), XMDstMatrix);
605
606
607
608
609
610
613 using namespace DirectX;
614 XMMATRIX M1 = XMLoadFloat4x4A( (
const XMFLOAT4X4A*)(MatrixM) );
615 return XMVector4Transform( VecP, M1 );
619
620
621
622
623
624
625#define VectorMin( Vec1, Vec2 ) DirectX::XMVectorMin( Vec1, Vec2 )
628
629
630
631
632
633
634#define VectorMax( Vec1, Vec2 ) DirectX::XMVectorMax( Vec1, Vec2 )
637
638
639
640
641
642
643
644
645
646#define VectorSwizzle( Vec, X, Y, Z, W ) DirectX::XMVectorSwizzle<X,Y,Z,W>( Vec )
649
650
651
652
653
654
655
656
657
658
659#define VectorShuffle( Vec1, Vec2, X, Y, Z, W ) DirectX::XMVectorPermute<X,Y,Z+4
,W+4
>( Vec1, Vec2 )
662
663
664
665
666
667
674
675
676
677
678
679
686
687
688
689
690
691
692
693
696 OutEvens = _mm_shuffle_ps(Lo, Hi, _MM_SHUFFLE(2, 0, 2, 0));
697 OutOdds = _mm_shuffle_ps(Lo, Hi, _MM_SHUFFLE(3, 1, 3, 1));
702
703
704
705
706
707
708
709#define VectorMask_LT( Vec1, Vec2 ) _mm_cmplt_ps( Vec1, Vec2 )
710#define VectorMask_LE( Vec1, Vec2 ) _mm_cmple_ps( Vec1, Vec2 )
711#define VectorMask_GT( Vec1, Vec2 ) DirectX::XMVectorGreater( Vec1, Vec2 )
712#define VectorMask_GE( Vec1, Vec2 ) DirectX::XMVectorGreaterOrEqual( Vec1, Vec2 )
713#define VectorMask_EQ( Vec1, Vec2 ) DirectX::XMVectorEqual( Vec1, Vec2 )
714#define VectorMask_NE( Vec1, Vec2 ) DirectX::XMVectorNotEqual( Vec1, Vec2 )
717
718
719
720
721
722
725 using namespace DirectX;
726 return DirectX::XMVectorSelect( VecXYZ, VecW, g_XMMaskW );
730
731
732
733
734
735
736#define VectorLoadByte4( Ptr ) DirectX::PackedVector::XMLoadUByte4((const DirectX::PackedVector::XMUBYTE4*)(Ptr) )
739
740
741
742
743
744
745#define VectorLoadSignedByte4( Ptr ) DirectX::PackedVector::XMLoadByte4((const DirectX::PackedVector::XMBYTE4*)(Ptr) )
748
749
750
751
752
753
761
762
763
764
765
766
767#define VectorStoreByte4( Vec, Ptr ) DirectX::PackedVector::XMStoreUByte4( (DirectX::PackedVector::XMUBYTE4*)(Ptr), Vec )
770
771
772
773
774
775
776#define VectorStoreSignedByte4( Vec, Ptr ) DirectX::PackedVector::XMStoreByte4( (DirectX::PackedVector::XMBYTE4*)(Ptr), Vec )
779
780
781
782
783
784
785#define VectorLoadURGB10A2N( Ptr ) DirectX::PackedVector::XMLoadUDecN4( (const DirectX::PackedVector::XMUDECN4*)(Ptr) )
788
789
790
791
792
793
794#define VectorStoreURGB10A2N( Vec, Ptr ) DirectX::PackedVector::XMStoreUDecN4( (const DirectX::PackedVector::XMUDECN4*)(Ptr), Vec )
797
798
799
800
801
802
803#define VectorLoadURGBA16N( Ptr ) DirectX::PackedVector::XMLoadUShortN4( (const DirectX::PackedVector::XMUSHORTN4*)(Ptr) )
806
807
808
809
810
811
812#define VectorLoadSRGBA16N( Ptr ) DirectX::PackedVector::XMLoadShortN4( (const DirectX::PackedVector::XMSHORTN4*)(Ptr) )
815
816
817
818
819
820
821#define VectorStoreURGBA16N( Vec, Ptr ) DirectX::PackedVector::XMStoreUShortN4( (const DirectX::PackedVector::XMUSHORTN4*)(Ptr), Vec )
824
825
826
827
828
829
832 using namespace DirectX;
834 uint32_t comparisonValue = XMVector4GreaterR( Vec1, Vec2 );
837 return (uint32)XMComparisonAnyTrue( comparisonValue );
841
842
843
845#define VectorResetFloatRegisters()
848
849
850
851
852#define VectorGetControlRegister() _mm_getcsr()
855
856
857
858
859#define VectorSetControlRegister(ControlStatus) _mm_setcsr( ControlStatus )
862
863
864#define VECTOR_ROUND_TOWARD_ZERO _MM_ROUND_TOWARD_ZERO
867
868
869
870
871
872
873
874
875
886 return DirectX::XMQuaternionMultiply( Quat2, Quat1 );
890
891
892
893
894
895
896
897
898
909
910
911
912
913
914
915
916
917
927 VectorRegister XMResult = DirectX::XMVector3Rotate(XMVec, XMQuat);
935 VectorRegister XMResult = DirectX::XMVector3InverseRotate(XMVec, XMQuat);
941
942
943
944
945
946
949 using namespace DirectX;
951 XMVECTOR x = XMVectorModAngles(*VAngles);
954 XMVECTOR sign = _mm_and_ps(x, g_XMNegativeZero);
955 __m128 c = _mm_or_ps(g_XMPi, sign);
956 __m128 absx = _mm_andnot_ps(sign, x);
957 __m128 rflx = _mm_sub_ps(c, x);
958 __m128 comp = _mm_cmple_ps(absx, g_XMHalfPi);
959 __m128 select0 = _mm_and_ps(comp, x);
960 __m128 select1 = _mm_andnot_ps(comp, rflx);
961 x = _mm_or_ps(select0, select1);
962 select0 = _mm_and_ps(comp, g_XMOne);
963 select1 = _mm_andnot_ps(comp, g_XMNegativeOne);
964 sign = _mm_or_ps(select0, select1);
966 __m128 x2 = _mm_mul_ps(x, x);
969 const XMVECTOR SC1 = g_XMSinCoefficients1;
970 XMVECTOR vConstants = XM_PERMUTE_PS( SC1, _MM_SHUFFLE(0, 0, 0, 0) );
971 __m128 Result = _mm_mul_ps(vConstants, x2);
973 const XMVECTOR SC0 = g_XMSinCoefficients0;
974 vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(3, 3, 3, 3) );
975 Result = _mm_add_ps(Result, vConstants);
976 Result = _mm_mul_ps(Result, x2);
978 vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(2, 2, 2, 2) );
979 Result = _mm_add_ps(Result, vConstants);
980 Result = _mm_mul_ps(Result, x2);
982 vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(1, 1, 1, 1) );
983 Result = _mm_add_ps(Result, vConstants);
984 Result = _mm_mul_ps(Result, x2);
986 vConstants = XM_PERMUTE_PS( SC0, _MM_SHUFFLE(0, 0, 0, 0) );
987 Result = _mm_add_ps(Result, vConstants);
988 Result = _mm_mul_ps(Result, x2);
989 Result = _mm_add_ps(Result, g_XMOne);
990 Result = _mm_mul_ps(Result, x);
991 *VSinAngles = Result;
994 const XMVECTOR CC1 = g_XMCosCoefficients1;
995 vConstants = XM_PERMUTE_PS( CC1, _MM_SHUFFLE(0, 0, 0, 0) );
996 Result = _mm_mul_ps(vConstants, x2);
998 const XMVECTOR CC0 = g_XMCosCoefficients0;
999 vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(3, 3, 3, 3) );
1000 Result = _mm_add_ps(Result, vConstants);
1001 Result = _mm_mul_ps(Result, x2);
1003 vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(2, 2, 2, 2) );
1004 Result = _mm_add_ps(Result, vConstants);
1005 Result = _mm_mul_ps(Result, x2);
1007 vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(1, 1, 1, 1) );
1008 Result = _mm_add_ps(Result, vConstants);
1009 Result = _mm_mul_ps(Result, x2);
1011 vConstants = XM_PERMUTE_PS( CC0, _MM_SHUFFLE(0, 0, 0, 0) );
1012 Result = _mm_add_ps(Result, vConstants);
1013 Result = _mm_mul_ps(Result, x2);
1014 Result = _mm_add_ps(Result, g_XMOne);
1015 Result = _mm_mul_ps(Result, sign);
1016 *VCosAngles = Result;
1037 return DirectX::XMVectorExp(X);
1042 return DirectX::XMVectorExp2(X);
1047 return DirectX::XMVectorLog(X);
1052 return DirectX::XMVectorLog2(X);
1057 return DirectX::XMVectorSin(X);
1062 return DirectX::XMVectorCos(X);
1067 return DirectX::XMVectorTan(X);
1072 return DirectX::XMVectorASin(X);
1077 return DirectX::XMVectorACos(X);
1082 return DirectX::XMVectorATan(X);
1087 return DirectX::XMVectorATan2(X, Y);
1092 return DirectX::XMVectorCeiling(X);
1097 return DirectX::XMVectorFloor(X);
1102 return DirectX::XMVectorTruncate(X);
1115 return DirectX::XMVectorClamp(Result,
VectorNegate(AbsY), AbsY);
1121 return MakeVectorRegister(
1131 return MakeVectorRegister(
1143#define VectorIntAnd(A, B) _mm_and_si128(A, B)
1145#define VectorIntOr(A, B) _mm_or_si128(A, B)
1147#define VectorIntXor(A, B) _mm_xor_si128(A, B)
1149#define VectorIntAndNot(A, B) _mm_andnot_si128(A, B)
1151#define VectorIntNot(A) _mm_xor_si128(A, GlobalVectorConstants::IntAllMask)
1154#define VectorIntCompareEQ(A, B) _mm_cmpeq_epi32(A,B)
1156#define VectorIntCompareGT(A, B) _mm_cmpgt_epi32(A,B)
1157#define VectorIntCompareLT(A, B) _mm_cmplt_epi32(A,B)
1164 return _mm_xor_si128(Vec2, _mm_and_si128(Mask, _mm_xor_si128(Vec1, Vec2)));
1168#define VectorIntAdd(A, B) _mm_add_epi32(A, B)
1169#define VectorIntSubtract(A, B) _mm_sub_epi32(A, B)
1174 __m128i Temp0 = _mm_mul_epu32(A, B);
1175 __m128i Temp1 = _mm_mul_epu32(_mm_srli_si128(A, 4), _mm_srli_si128(B, 4));
1176 return _mm_unpacklo_epi32(_mm_shuffle_epi32(Temp0, _MM_SHUFFLE(0, 0, 2, 0)), _mm_shuffle_epi32(Temp1, _MM_SHUFFLE(0, 0, 2, 0)));
1199#define VectorIntSign(A) VectorIntSelect( VectorIntCompareGE(A, GlobalVectorConstants::IntZero), GlobalVectorConstants::IntOne, GlobalVectorConstants::IntMinusOne )
1201#define VectorIntToFloat(A) _mm_cvtepi32_ps(A)
1202#define VectorFloatToInt(A) _mm_cvttps_epi32(A)
1206 return _mm_cvtps_epi32(Vec);
1212
1213
1214
1215
1216
1217#define VectorIntStore( Vec, Ptr ) _mm_storeu_si128( (VectorRegister4Int*)(Ptr), Vec )
1220
1221
1222
1223
1224
1225#define VectorIntLoad( Ptr ) _mm_loadu_si128( (VectorRegister4Int*)(Ptr) )
1228
1229
1230
1231
1232
1233#define VectorIntStoreAligned( Vec, Ptr ) _mm_store_si128( (VectorRegister4Int*)(Ptr), Vec )
1236
1237
1238
1239
1240
1241#define VectorIntLoadAligned( Ptr ) _mm_load_si128( (VectorRegister4Int*)(Ptr) )
1244
1245
1246
1247
1248
1249#define VectorIntLoad1( Ptr ) _mm_shuffle_epi32(_mm_loadu_si128((VectorRegister4Int*)(Ptr)),_MM_SHUFFLE
(0
,0
,0
,0
))
FORCEINLINE VectorRegister VectorStep(const VectorRegister &X)
#define VectorStoreAligned(Vec, Ptr)
FORCEINLINE VectorRegister VectorFloor(const VectorRegister &X)
FORCEINLINE void VectorQuaternionMultiply(VectorRegister *VResult, const VectorRegister *VQuat1, const VectorRegister *VQuat2)
FORCEINLINE VectorRegister4Int VectorIntAbs(const VectorRegister4Int &A)
FORCEINLINE float VectorGetComponent(VectorRegister Vec, uint32 ComponentIndex)
FORCEINLINE VectorRegister4Int VectorIntMin(const VectorRegister4Int &A, const VectorRegister4Int &B)
FORCEINLINE VectorRegister VectorQuaternionMultiply2(const VectorRegister &Quat1, const VectorRegister &Quat2)
#define VectorMaskBits(VecMask)
FORCEINLINE void VectorQuaternionVector3Rotate(FVector *Result, const FVector *Vec, const FQuat *Quat)
FORCEINLINE bool VectorContainsNaNOrInfinite(const VectorRegister &Vec)
#define VectorShuffle(Vec1, Vec2, X, Y, Z, W)
FORCEINLINE VectorRegister VectorLoadByte4Reverse(const uint8 *Ptr)
FORCEINLINE VectorRegister VectorTruncate(const VectorRegister &X)
#define VectorIntCompareLT(A, B)
FORCEINLINE void VectorQuaternionMultiply(FQuat *Result, const FQuat *Quat1, const FQuat *Quat2)
FORCEINLINE VectorRegister VectorMod(const VectorRegister &X, const VectorRegister &Y)
FORCEINLINE VectorRegister VectorSin(const VectorRegister &X)
FORCEINLINE VectorRegister4Int VectorIntSelect(const VectorRegister4Int &Mask, const VectorRegister4Int &Vec1, const VectorRegister4Int &Vec2)
FORCEINLINE VectorRegister VectorExp(const VectorRegister &X)
DirectX::XMVECTOR VectorRegister
FORCEINLINE VectorRegister VectorMergeVecXYZ_VecW(const VectorRegister &VecXYZ, const VectorRegister &VecW)
FORCEINLINE VectorRegister VectorCos(const VectorRegister &X)
FORCEINLINE VectorRegister VectorLog(const VectorRegister &X)
#define VectorNegate(Vec)
FORCEINLINE VectorRegister VectorATan(const VectorRegister &X)
FORCEINLINE VectorRegister VectorTransformVector(const VectorRegister &VecP, const FMatrix *MatrixM)
FORCEINLINE VectorRegister VectorLog2(const VectorRegister &X)
FORCEINLINE VectorRegister VectorATan2(const VectorRegister &X, const VectorRegister &Y)
FORCEINLINE VectorRegister VectorCeil(const VectorRegister &X)
FORCEINLINE VectorRegister VectorCombineHigh(const VectorRegister &Vec1, const VectorRegister &Vec2)
#define VectorBitwiseAnd(Vec1, Vec2)
FORCEINLINE VectorRegister VectorASin(const VectorRegister &X)
FORCEINLINE VectorRegister4Int VectorRoundToIntHalfToEven(const VectorRegister4Float &Vec)
#define VectorIntCompareGT(A, B)
FORCEINLINE void VectorQuaternionVector3InverseRotate(FVector *Result, const FVector *Vec, const FQuat *Quat)
FORCEINLINE VectorRegister VectorACos(const VectorRegister &X)
#define VectorCompareEQ(Vec1, Vec2)
FORCEINLINE void VectorDeinterleave(VectorRegister &OutEvens, VectorRegister &OutOdds, const VectorRegister &Lo, const VectorRegister &Hi)
FORCEINLINE void VectorMatrixMultiply(FMatrix *Result, const FMatrix *Matrix1, const FMatrix *Matrix2)
#define VectorIntNegate(A)
FORCEINLINE VectorRegister VectorCombineLow(const VectorRegister &Vec1, const VectorRegister &Vec2)
FORCEINLINE VectorRegister VectorExp2(const VectorRegister &X)
FORCEINLINE VectorRegister VectorLoadTwoPairsFloat(const float *Ptr1, const float *Ptr2)
#define VectorIntSubtract(A, B)
#define VectorSwizzle(Vec, X, Y, Z, W)
#define VectorIntCompareGE(A, B)
FORCEINLINE VectorRegister4Int VectorIntMax(const VectorRegister4Int &A, const VectorRegister4Int &B)
#define VectorLoadAligned(Ptr)
#define VectorLoadFloat3_W0(Ptr)
#define VectorLoadByte4(Ptr)
FORCEINLINE VectorRegister VectorTan(const VectorRegister &X)
FORCEINLINE uint32 VectorAnyGreaterThan(const VectorRegister &Vec1, const VectorRegister &Vec2)
#define VectorSubtract(Vec1, Vec2)
FORCEINLINE void VectorMatrixInverse(FMatrix *DstMatrix, const FMatrix *SrcMatrix)
FORCEINLINE VectorRegister4Int VectorIntMultiply(const VectorRegister4Int &A, const VectorRegister4Int &B)
FORCEINLINE VectorRegister VectorSign(const VectorRegister &X)
FORCEINLINE void VectorSinCos(VectorRegister *RESTRICT VSinAngles, VectorRegister *RESTRICT VCosAngles, const VectorRegister *RESTRICT VAngles)
#define VectorStoreFloat3(Vec, Ptr)
__m128i VectorRegister4Int
FORCEINLINE VectorRegister4Float VectorFractional(const VectorRegister4Float &Vec)