37 vec4(
const __m128& rhs);
40 void operator=(
const __m128 &rhs);
42 void operator+=(
const vec4 &rhs);
44 void operator-=(
const vec4 &rhs);
48 void operator*=(
const vec4& rhs);
50 void operator/=(
const vec4& rhs);
57 void load(
const scalar* ptr);
59 void loadu(
const scalar* ptr);
61 void store(
scalar* ptr)
const;
63 void storeu(
scalar* ptr)
const;
65 void store3(
scalar* ptr)
const;
67 void storeu3(
scalar* ptr)
const;
69 void stream(
scalar* ptr)
const;
72 void load_float3(
const void* ptr,
float w);
74 void load_ubyte4n(
const void* ptr);
76 void load_byte4n(
const void* ptr);
81 template<
int X,
int Y,
int Z,
int W>
85 scalar& operator[](
const int index);
87 scalar operator[](
const int index)
const;
106 this->
vec = _mm_setr_ps(
x,
y,
z,
w);
115 this->
vec = _mm_set1_ps(
v);
152 __m128 vTemp = _mm_cmpeq_ps(this->
vec, rhs.
vec);
153 return ((_mm_movemask_ps(vTemp) == 0x0f) != 0);
162 __m128 vTemp = _mm_cmpeq_ps(this->
vec, rhs.
vec);
163 return ((_mm_movemask_ps(vTemp) == 0x0f) == 0);
173 this->
vec = _mm_load_ps(ptr);
183 this->
vec = _mm_loadu_ps(ptr);
193 _mm_store_ps(ptr, this->
vec);
203 _mm_storeu_ps(ptr, this->
vec);
212 __m128 vv = _mm_permute_ps(this->
vec, _MM_SHUFFLE(2, 2, 2, 2));
213 _mm_storel_epi64(
reinterpret_cast<__m128i*
>(ptr), _mm_castps_si128(this->
vec));
214 _mm_store_ss(&ptr[2], vv);
223 __m128 t1 = _mm_permute_ps(this->
vec, _MM_SHUFFLE(1, 1, 1, 1));
224 __m128 t2 = _mm_permute_ps(this->
vec, _MM_SHUFFLE(2, 2, 2, 2));
225 _mm_store_ss(&ptr[0], this->
vec);
226 _mm_store_ss(&ptr[1], t1);
227 _mm_store_ss(&ptr[2], t2);
245 float* source = (
float*)ptr;
246 this->
vec = _mm_load_ps(source);
257 this->
vec = _mm_mul_ps(this->
vec, rhs.
vec);
266 this->
vec = _mm_div_ps(this->
vec, rhs.
vec);
275 this->
vec = _mm_add_ps(this->
vec, rhs.
vec);
284 this->
vec = _mm_sub_ps(this->
vec, rhs.
vec);
293 __m128 temp = _mm_set1_ps(s);
294 this->
vec = _mm_mul_ps(this->
vec, temp);
303 this->
vec = _mm_setr_ps(
x,
y,
z,
w);
309template<
int X,
int Y,
int Z,
int W>
313 return _mm_shuffle_ps(
v.vec,
v.vec, _MM_SHUFFLE(
W,
Z,
Y,
X));
323 return this->
v[index];
333 return this->
v[index];
342 return vec4(_mm_xor_ps(_mm_castsi128_ps(
_sign), lhs.
vec));
351 __m128 temp = _mm_set1_ps(t);
352 return _mm_mul_ps(lhs.
vec, temp);
361 return _mm_mul_ps(lhs.
vec, rhs.
vec);
370 return _mm_add_ps(lhs.
vec, rhs.
vec);
379 return _mm_sub_ps(lhs.
vec, rhs.
vec);
388 return _mm_cvtss_f32(_mm_sqrt_ss(_mm_dp_ps(v.
vec, v.
vec, 0xF1)));
397 return _mm_cvtss_f32(_mm_sqrt_ss(_mm_dp_ps(v.
vec, v.
vec, 0x71)));
406 return _mm_cvtss_f32(_mm_dp_ps(v.
vec, v.
vec, 0xF1));
415 return _mm_cvtss_f32(_mm_dp_ps(v.
vec, v.
vec, 0x71));
433 return _mm_rcp_ps(v.
vec);
442 return _mm_mul_ps(v0.
vec, v1.
vec);
452 return _mm_fmadd_ps(v0.
vec, v1.
vec, v2.
vec);
454 return _mm_add_ps(_mm_mul_ps(v0.
vec, v1.
vec), v2.
vec);
464 return _mm_div_ps(v0.
vec, v1.
vec);
473 unsigned int val = 0x7fffffff;
474 __m128 temp = _mm_set1_ps(*(
float*)&val);
475 return _mm_and_ps(v.
vec, temp);
484 __m128 tmp0, tmp1, tmp2, tmp3, result;
485 tmp0 = _mm_shuffle_ps(v0.
vec, v0.
vec, _MM_SHUFFLE(3, 0, 2, 1));
486 tmp1 = _mm_shuffle_ps(v1.
vec, v1.
vec, _MM_SHUFFLE(3, 1, 0, 2));
487 tmp2 = _mm_shuffle_ps(v0.
vec, v0.
vec, _MM_SHUFFLE(3, 1, 0, 2));
488 tmp3 = _mm_shuffle_ps(v1.
vec, v1.
vec, _MM_SHUFFLE(3, 0, 2, 1));
489 result = _mm_mul_ps(tmp0, tmp1);
490 result = _mm_sub_ps(result, _mm_mul_ps(tmp2, tmp3));
500 return _mm_cvtss_f32(_mm_dp_ps(v0.
vec, v1.
vec, 0xF1));
509 return _mm_cvtss_f32(_mm_dp_ps(v0.
vec, v1.
vec, 0x71));
519 __m128 R1 = _mm_sub_ps(v1.
vec, v0.
vec);
520 __m128 SF = _mm_set_ps1(f);
521 __m128 R2 = _mm_sub_ps(v2.
vec, v0.
vec);
522 __m128 SG = _mm_set_ps1(g);
523 R1 = _mm_mul_ps(R1, SF);
524 R2 = _mm_mul_ps(R2, SG);
525 R1 = _mm_add_ps(R1, v0.
vec);
526 R1 = _mm_add_ps(R1, R2);
539 __m128 P0 = _mm_set_ps1((-s3 + 2.0f * s2 - s) * 0.5f);
540 __m128 P1 = _mm_set_ps1((3.0f * s3 - 5.0f * s2 + 2.0f) * 0.5f);
541 __m128 P2 = _mm_set_ps1((-3.0f * s3 + 4.0f * s2 + s) * 0.5f);
542 __m128 P3 = _mm_set_ps1((s3 - s2) * 0.5f);
544 P0 = _mm_mul_ps(P0, v0.
vec);
545 P1 = _mm_mul_ps(P1, v1.
vec);
546 P2 = _mm_mul_ps(P2, v2.
vec);
547 P3 = _mm_mul_ps(P3, v3.
vec);
548 P0 = _mm_add_ps(P0, P1);
549 P2 = _mm_add_ps(P2, P3);
550 P0 = _mm_add_ps(P0, P2);
563 __m128 P0 = _mm_set_ps1(2.0f * s3 - 3.0f * s2 + 1.0f);
564 __m128 T0 = _mm_set_ps1(s3 - 2.0f * s2 + s);
565 __m128 P1 = _mm_set_ps1(-2.0f * s3 + 3.0f * s2);
566 __m128 T1 = _mm_set_ps1(s3 - s2);
568 __m128 vResult = _mm_mul_ps(P0, v1.
vec);
569 __m128 vTemp = _mm_mul_ps(T0, t1.
vec);
570 vResult = _mm_add_ps(vResult, vTemp);
571 vTemp = _mm_mul_ps(P1, v2.
vec);
572 vResult = _mm_add_ps(vResult, vTemp);
573 vTemp = _mm_mul_ps(T1, t2.
vec);
574 vResult = _mm_add_ps(vResult, vTemp);
585 __m128 l0 = _mm_mul_ps(v0.
vec, v0.
vec);
586 l0 = _mm_add_ps(_mm_shuffle_ps(l0, l0, _MM_SHUFFLE(0, 0, 0, 0)),
587 _mm_add_ps(_mm_shuffle_ps(l0, l0, _MM_SHUFFLE(1, 1, 1, 1)), _mm_shuffle_ps(l0, l0, _MM_SHUFFLE(2, 2, 2, 2))));
589 __m128 l1 = _mm_mul_ps(v1.
vec, v1.
vec);
590 l1 = _mm_add_ps(_mm_shuffle_ps(l1, l1, _MM_SHUFFLE(0, 0, 0, 0)),
591 _mm_add_ps(_mm_shuffle_ps(l1, l1, _MM_SHUFFLE(1, 1, 1, 1)), _mm_shuffle_ps(l1, l1, _MM_SHUFFLE(2, 2, 2, 2))));
593 __m128 l = _mm_shuffle_ps(l0, l1, _MM_SHUFFLE(0, 0, 0, 0));
595 l = _mm_mul_ss(_mm_shuffle_ps(l, l, _MM_SHUFFLE(0, 0, 0, 0)), _mm_shuffle_ps(l, l, _MM_SHUFFLE(1, 1, 1, 1)));
599 dot = _mm_add_ps(_mm_shuffle_ps(
dot,
dot, _MM_SHUFFLE(0, 0, 0, 0)),
600 _mm_add_ps(_mm_shuffle_ps(
dot,
dot, _MM_SHUFFLE(1, 1, 1, 1)),
601 _mm_add_ps(_mm_shuffle_ps(
dot,
dot, _MM_SHUFFLE(2, 2, 2, 2)), _mm_shuffle_ps(
dot,
dot, _MM_SHUFFLE(3, 3, 3, 3)))));
609 _mm_store_ss(&cangle,
dot);
619 return v0 + ((v1 - v0) * s);
628 return _mm_max_ps(v0.
vec, v1.
vec);
637 return _mm_min_ps(v0.
vec, v1.
vec);
646 __m128 temp = _mm_max_ps(
min.vec,
clamp.vec);
647 temp = _mm_min_ps(temp,
max.vec);
657 if (v ==
vec4(0))
return v;
658 return _mm_div_ps(v.
vec, _mm_sqrt_ps(_mm_dp_ps(v.
vec, v.
vec, 0xFF)));
667 if (v ==
vec4(0))
return v;
668 return _mm_mul_ps(v.
vec, _mm_rsqrt_ps(_mm_dp_ps(v.
vec, v.
vec, 0xFF)));
677 if (v ==
vec4(0))
return v;
678 __m128 t = _mm_div_ps(v.
vec, _mm_sqrt_ps(_mm_dp_ps(v.
vec, v.
vec, 0x77)));
679 return _mm_insert_ps(t, v.
vec, 0xF0);
688 if (v ==
vec4(0))
return v;
689 __m128 t = _mm_mul_ps(v.
vec, _mm_rsqrt_ps(_mm_dp_ps(v.
vec, v.
vec, 0x77)));
690 return _mm_insert_ps(t, v.
vec, 0xF0);
698 __m128 res = _mm_mul_ps(incident.
vec, normal.
vec);
699 res = _mm_add_ps(_mm_shuffle_ps(res, res, _MM_SHUFFLE(0, 0, 0, 0)),
700 _mm_add_ps(_mm_shuffle_ps(res, res, _MM_SHUFFLE(1, 1, 1, 1)), _mm_shuffle_ps(res, res, _MM_SHUFFLE(2, 2, 2, 2))));
701 res = _mm_add_ps(res, res);
702 res = _mm_mul_ps(res, normal.
vec);
703 res = _mm_sub_ps(incident.
vec, res);
713 __m128 d = _mm_set_ps1(1.0f / v.
w);
714 return _mm_mul_ps(v.
vec, d);
723 __m128 vTemp = _mm_cmpge_ps(v0.
vec, v1.
vec);
724 int res = _mm_movemask_ps(vTemp);
734 __m128 vTemp = _mm_cmpge_ps(v0.
vec, v1.
vec);
735 int res = _mm_movemask_ps(vTemp);
745 __m128 vTemp = _mm_cmpgt_ps(v0.
vec, v1.
vec);
746 int res = _mm_movemask_ps(vTemp);
756 __m128 vTemp = _mm_cmpgt_ps(v0.
vec, v1.
vec);
757 int res = _mm_movemask_ps(vTemp);
767 __m128 vTemp = _mm_cmpgt_ps(v0.
vec, v1.
vec);
768 int res = _mm_movemask_ps(vTemp);
778 __m128 vTemp = _mm_cmpgt_ps(v0.
vec, v1.
vec);
779 int res = _mm_movemask_ps(vTemp);
789 __m128 vTemp = _mm_cmpge_ps(v0.
vec, v1.
vec);
790 int res = _mm_movemask_ps(vTemp);
800 __m128 vTemp = _mm_cmpge_ps(v0.
vec, v1.
vec);
801 int res = _mm_movemask_ps(vTemp);
811 __m128 vTemp = _mm_cmpeq_ps(v0.
vec, v1.
vec);
812 int res = _mm_movemask_ps(vTemp);
822 __m128 eps = _mm_set1_ps(epsilon);
823 __m128 delta = _mm_sub_ps(v0.
vec, v1.
vec);
824 __m128 temp = _mm_setzero_ps();
825 temp = _mm_sub_ps(temp, delta);
826 temp = _mm_max_ps(temp, delta);
827 temp = _mm_cmple_ps(temp, eps);
828 return (_mm_movemask_ps(temp) == 0xf) != 0;
837 __m128 delta = _mm_sub_ps(v0.
vec, v1.
vec);
838 __m128 temp = _mm_setzero_ps();
839 temp = _mm_sub_ps(temp, delta);
840 temp = _mm_max_ps(temp, delta);
841 temp = _mm_cmple_ps(temp, epsilon.
vec);
842 return (_mm_movemask_ps(temp) == 0xf) != 0;
851 return _mm_min_ps(_mm_cmplt_ps(v0.
vec, v1.
vec),
_plus1);
860 return _mm_min_ps(_mm_cmpgt_ps(v0.
vec, v1.
vec),
_plus1);
869 return _mm_min_ps(_mm_cmpeq_ps(v0.
vec, v1.
vec),
_plus1);
878 __m128 vTemp = _mm_cmpge_ps(v0.
vec, v1.
vec);
879 int res = _mm_movemask_ps(vTemp) & 7;
889 __m128 vTemp = _mm_cmpge_ps(v0.
vec, v1.
vec);
890 int res = _mm_movemask_ps(vTemp) & 7;
900 __m128 vTemp = _mm_cmpgt_ps(v0.
vec, v1.
vec);
901 int res = _mm_movemask_ps(vTemp) & 7;
911 __m128 vTemp = _mm_cmpgt_ps(v0.
vec, v1.
vec);
912 int res = _mm_movemask_ps(vTemp) & 7;
922 __m128 vTemp = _mm_cmpgt_ps(v0.
vec, v1.
vec);
923 int res = _mm_movemask_ps(vTemp) & 7;
933 __m128 vTemp = _mm_cmpgt_ps(v0.
vec, v1.
vec);
934 int res = _mm_movemask_ps(vTemp) & 7;
944 __m128 vTemp = _mm_cmpge_ps(v0.
vec, v1.
vec);
945 int res = _mm_movemask_ps(vTemp) & 7;
955 __m128 vTemp = _mm_cmpge_ps(v0.
vec, v1.
vec);
956 int res = _mm_movemask_ps(vTemp) & 7;
966 __m128 vTemp = _mm_cmpeq_ps(v0.
vec, v1.
vec);
967 int res = _mm_movemask_ps(vTemp) & 7;
977 __m128 vTemp = _mm_cmpeq_ps(v0.
vec, v1.
vec);
978 int res = _mm_movemask_ps(vTemp) & 7;
988 __m128 delta = _mm_sub_ps(v0.
vec, v1.
vec);
989 __m128 temp = _mm_setzero_ps();
990 temp = _mm_sub_ps(temp, delta);
991 temp = _mm_max_ps(temp, delta);
992 temp = _mm_cmple_ps(temp, epsilon.
vec);
993 return (_mm_movemask_ps(temp) == 0x7) != 0;
1002 n_assert(element < 4 && element >= 0);
1007 return _mm_shuffle_ps(v.
vec, v.
vec, _MM_SHUFFLE(0, 0, 0, 0));
1009 return _mm_shuffle_ps(v.
vec, v.
vec, _MM_SHUFFLE(1, 1, 1, 1));
1011 return _mm_shuffle_ps(v.
vec, v.
vec, _MM_SHUFFLE(2, 2, 2, 2));
1013 return _mm_shuffle_ps(v.
vec, v.
vec, _MM_SHUFFLE(3, 3, 3, 3));
1022 return _mm_shuffle_ps(v.
vec, v.
vec, _MM_SHUFFLE(0, 0, 0, 0));
1031 return _mm_shuffle_ps(v.
vec, v.
vec, _MM_SHUFFLE(1, 1, 1, 1));
1040 return _mm_shuffle_ps(v.
vec, v.
vec, _MM_SHUFFLE(2, 2, 2, 2));
1049 return _mm_shuffle_ps(v.
vec, v.
vec, _MM_SHUFFLE(3, 3, 3, 3));
1064permute(
const vec4& v0,
const vec4& v1,
unsigned int i0,
unsigned int i1,
unsigned int i2,
unsigned int i3)
1066 static __m128i three = _mm_set_epi32(3, 3, 3, 3);
1069 __m128i vControl = _mm_load_si128(
reinterpret_cast<const __m128i*
>(&elem[0]));
1071 __m128i vSelect = _mm_cmpgt_epi32(vControl, three);
1072 vControl = _mm_and_si128(vControl, three);
1074 __m128 shuffled1 = _mm_permutevar_ps(v0.
vec, vControl);
1075 __m128 shuffled2 = _mm_permutevar_ps(v1.
vec, vControl);
1077 __m128 masked1 = _mm_andnot_ps(_mm_castsi128_ps(vSelect), shuffled1);
1078 __m128 masked2 = _mm_and_ps(_mm_castsi128_ps(vSelect), shuffled2);
1080 return _mm_or_ps(masked1, masked2);
1090 return permute(v0, v1, i0, i1, i2, i3);
1099 __m128 v0masked = _mm_andnot_ps(control.
vec, v0.
vec);
1100 __m128 v1masked = _mm_and_ps(v1.
vec, control.
vec);
1101 return _mm_or_ps(v0masked, v1masked);
1110 return _mm_floor_ps(v.
vec);
1119 return _mm_ceil_ps(v.
vec);
#define n_assert(exp)
Definition debug.h:50
#define X(x)
Definition materialloader.cc:153
Different curves.
Definition angularpfeedbackloop.h:17
__forceinline point less(const point &v0, const point &v1)
Definition point.h:501
__forceinline bool equal3_any(const vec4 &v0, const vec4 &v1)
Definition vec4.h:964
__forceinline point maximize(const point &v0, const point &v1)
Definition point.h:368
__forceinline quat barycentric(const quat &q0, const quat &q1, const quat &q2, scalar f, scalar g)
Definition quat.h:295
__forceinline scalar dot3(const vec4 &v0, const vec4 &v1)
Definition vec4.h:507
static const unsigned int PERMUTE_0Y
Definition vec4.h:1053
__forceinline vec4 normalize3approx(const vec4 &v)
Definition vec4.h:686
__forceinline bool equal_any(const point &v0, const point &v1)
Definition point.h:474
__forceinline bool lessequal3_all(const vec4 &v0, const vec4 &v1)
Definition vec4.h:909
__forceinline bool greaterequal_any(const point &v0, const point &v1)
Definition point.h:452
__forceinline bool greaterequal3_all(const vec4 &v0, const vec4 &v1)
Definition vec4.h:953
__forceinline vec3 splat_z(const vec3 &v)
Definition vec3.h:823
__forceinline vec4 perspective_div(const vec4 &v)
Definition vec4.h:711
__forceinline scalar length3(const vec4 &v)
Definition vec4.h:395
static const __m128i _sign
Definition vec3.h:36
__forceinline vec3 hermite(const vec3 &v1, const vec3 &t1, const vec3 &v2, const vec3 &t2, scalar s)
Definition vec3.h:484
__forceinline point equal(const point &v0, const point &v1)
Definition point.h:519
__forceinline vec3 splat_x(const vec3 &v)
Definition vec3.h:801
__forceinline vec3 splat(const vec3 &v, uint element)
Definition vec3.h:776
mat4 reflect(const vec4 &p)
based on this http://www.opengl.org/discussion_boards/showthread.php/169605-reflection-matrix-how-to-...
Definition mat4.cc:22
__forceinline vec3 multiply(const vec3 &v0, const vec3 &v1)
Definition vec3.h:375
__forceinline scalar angle(const vec3 &v0, const vec3 &v1)
Definition vec3.h:508
__forceinline vec3 divide(const vec3 &v0, const vec3 &v1)
Definition vec3.h:397
static const unsigned int PERMUTE_0W
Definition vec4.h:1055
static const unsigned int PERMUTE_1Y
Definition vec4.h:1057
__forceinline bool less3_all(const vec4 &v0, const vec4 &v1)
Definition vec4.h:887
static const unsigned int PERMUTE_1X
Definition vec4.h:1056
__forceinline vec3 reciprocal(const vec3 &v)
Definition vec3.h:357
__forceinline __m128 swizzle(__m128 v)
Definition sse.h:83
__forceinline scalar length(const quat &q)
Definition quat.h:259
__forceinline scalar lengthsq(const quat &q)
Definition quat.h:268
__forceinline scalar dot(const plane &p, const vec4 &v1)
Definition plane.h:246
__forceinline vec3 permute(const vec3 &v0, const vec3 &v1, unsigned int i0, unsigned int i1, unsigned int i2)
Definition vec3.h:834
__forceinline bool less3_any(const vec4 &v0, const vec4 &v1)
Definition vec4.h:876
__forceinline plane normalize(const plane &p)
Definition plane.h:255
__forceinline float ceil(float val)
Floating point ceiling.
Definition scalar.h:523
__forceinline bool greaterequal_all(const point &v0, const point &v1)
Definition point.h:463
__forceinline bool greater_any(const point &v0, const point &v1)
Definition point.h:430
__forceinline float lerp(float x, float y, float l)
Linearly interpolate between 2 values: ret = x + l * (y - x)
Definition scalar.h:597
static const unsigned int PERMUTE_1Z
Definition vec4.h:1058
__forceinline float clamp(float val, float minVal, float maxVal)
Float clamping.
Definition scalar.h:487
half operator-(half one, half two)
Definition half.h:114
__forceinline bool less_any(const point &v0, const point &v1)
Definition point.h:386
__forceinline bool nearequal3(const vec4 &v0, const vec4 &v1, const vec4 &epsilon)
Definition vec4.h:986
__forceinline vec3 catmullrom(const vec3 &v0, const vec3 &v1, const vec3 &v2, const vec3 &v3, scalar s)
Definition vec3.h:460
__forceinline bool greater3_any(const vec4 &v0, const vec4 &v1)
Definition vec4.h:920
__forceinline TYPE min(TYPE a, TYPE b)
Definition scalar.h:390
__forceinline bool less_all(const point &v0, const point &v1)
Definition point.h:397
__forceinline bool lessequal_all(const point &v0, const point &v1)
Definition point.h:419
__forceinline vec3 select(const vec3 &v0, const vec3 &v1, const uint i0, const uint i1, const uint i2)
Definition vec3.h:857
__forceinline vec3 splat_y(const vec3 &v)
Definition vec3.h:812
static const __m128 _minus1
Definition vec3.h:33
__forceinline point minimize(const point &v0, const point &v1)
Definition point.h:377
float scalar
Definition scalar.h:45
__forceinline vec4 normalize3(const vec4 &v)
Definition vec4.h:675
__forceinline bool greater3_all(const vec4 &v0, const vec4 &v1)
Definition vec4.h:931
static const unsigned int PERMUTE_0X
Definition vec4.h:1052
__forceinline bool lessequal3_any(const vec4 &v0, const vec4 &v1)
Definition vec4.h:898
__forceinline scalar lengthsq3(const vec4 &v)
Definition vec4.h:413
__forceinline point greater(const point &v0, const point &v1)
Definition point.h:510
half operator+(half one, half two)
Definition half.h:105
half operator*(half one, half two)
Definition half.h:123
__forceinline TYPE max(TYPE a, TYPE b)
Definition scalar.h:359
__forceinline scalar abs(scalar a)
Definition scalar.h:432
__forceinline vec3 multiplyadd(const vec3 &v0, const vec3 &v1, const vec3 &v2)
Definition vec3.h:384
__forceinline vec4 cross3(const vec4 &v0, const vec4 &v1)
Definition vec4.h:482
static const unsigned int PERMUTE_1W
Definition vec4.h:1059
__forceinline scalar acos(scalar x)
Definition scalar.h:218
__forceinline bool greater_all(const point &v0, const point &v1)
Definition point.h:441
__forceinline bool nearequal(const point &v0, const point &v1, float epsilon)
Definition point.h:485
static const __m128 _mask_xyz
Definition vec3.h:37
__forceinline bool lessequal_any(const point &v0, const point &v1)
Definition point.h:408
__forceinline bool greaterequal3_any(const vec4 &v0, const vec4 &v1)
Definition vec4.h:942
__forceinline vec3 xyz(const point &v)
Definition point.h:528
static const unsigned int PERMUTE_0Z
Definition vec4.h:1054
__forceinline float floor(float val)
Floating point flooring.
Definition scalar.h:533
__forceinline vec3 normalizeapprox(const vec3 &v)
Definition vec3.h:592
__forceinline vec4 splat_w(const vec4 &v)
Definition vec4.h:1047
__forceinline bool equal3_all(const vec4 &v0, const vec4 &v1)
Definition vec4.h:975
static const __m128 _plus1
Definition vec3.h:34
__forceinline vec3 reciprocalapprox(const vec3 &v)
Definition vec3.h:366
Nebula's scalar datatype.
A 3D vector.
Definition vec3.h:40
__m128 vec
Definition vec3.h:96
A 4D vector.
Definition vec4.h:24
void loadu(const scalar *ptr)
load content from unaligned memory
Definition vec4.h:181
void store(scalar *ptr) const
write content to 16-byte-aligned memory through the write cache
Definition vec4.h:191
void stream(scalar *ptr) const
stream content to 16-byte-aligned memory circumventing the write-cache
Definition vec4.h:234
scalar & operator[](const int index)
read-only access to indexed component
Definition vec4.h:320
float y
Definition vec4.h:93
void operator-=(const vec4 &rhs)
inplace sub
Definition vec4.h:282
float z
Definition vec4.h:93
void operator*=(scalar s)
inplace scalar multiply
Definition vec4.h:291
vec4()=default
default constructor, NOTE: does NOT setup components!
void store3(scalar *ptr) const
write content to 16-byte-aligned memory through the write cache
Definition vec4.h:210
vec4 swizzle(const vec4 &v)
swizzle vector
Definition vec4.h:311
__m128 vec
Definition vec4.h:95
float v[4]
Definition vec4.h:96
bool operator==(const vec4 &rhs) const
equality operator
Definition vec4.h:150
bool operator!=(const vec4 &rhs) const
inequality operator
Definition vec4.h:160
void storeu3(scalar *ptr) const
write content to unaligned memory through the write cache
Definition vec4.h:221
vec4(const vec4 &rhs)=default
copy constructor
void operator=(const __m128 &rhs)
assign an vmVector4
Definition vec4.h:141
float w
Definition vec4.h:93
void load_float3(const void *ptr, float w)
load 3 floats into x,y,z from unaligned memory
Definition vec4.h:243
void storeu(scalar *ptr) const
write content to unaligned memory through the write cache
Definition vec4.h:201
void set(scalar x, scalar y, scalar z, scalar w)
set content
Definition vec4.h:301
float x
Definition vec4.h:93
void load(const scalar *ptr)
load content from 16-byte-aligned memory
Definition vec4.h:171
void operator/=(const vec4 &rhs)
divide by a vector component-wise
Definition vec4.h:264
void operator+=(const vec4 &rhs)
inplace add
Definition vec4.h:273
bool operator==(const TiXmlString &a, const TiXmlString &b)
Definition tinystr.h:272
bool operator!=(const TiXmlString &a, const TiXmlString &b)
Definition tinystr.h:282
#define NEBULA_ALIGN16
Definition types.h:154
unsigned int uint
Definition types.h:31