1use crate::{f32::math, sse2::*, BVec3A, Vec2, Vec3, Vec4};
4
5#[cfg(not(target_arch = "spirv"))]
6use core::fmt;
7use core::iter::{Product, Sum};
8use core::{f32, ops::*};
9
10#[cfg(target_arch = "x86")]
11use core::arch::x86::*;
12#[cfg(target_arch = "x86_64")]
13use core::arch::x86_64::*;
14
15#[repr(C)]
16union UnionCast {
17 a: [f32; 4],
18 v: Vec3A,
19}
20
21#[inline(always)]
23#[must_use]
24pub const fn vec3a(x: f32, y: f32, z: f32) -> Vec3A {
25 Vec3A::new(x, y, z)
26}
27
28#[derive(Clone, Copy)]
38#[repr(transparent)]
39pub struct Vec3A(pub(crate) __m128);
40
41impl Vec3A {
42 pub const ZERO: Self = Self::splat(0.0);
44
45 pub const ONE: Self = Self::splat(1.0);
47
48 pub const NEG_ONE: Self = Self::splat(-1.0);
50
51 pub const MIN: Self = Self::splat(f32::MIN);
53
54 pub const MAX: Self = Self::splat(f32::MAX);
56
57 pub const NAN: Self = Self::splat(f32::NAN);
59
60 pub const INFINITY: Self = Self::splat(f32::INFINITY);
62
63 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
65
66 pub const X: Self = Self::new(1.0, 0.0, 0.0);
68
69 pub const Y: Self = Self::new(0.0, 1.0, 0.0);
71
72 pub const Z: Self = Self::new(0.0, 0.0, 1.0);
74
75 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0);
77
78 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0);
80
81 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0);
83
84 pub const AXES: [Self; 3] = [Self::X, Self::Y, Self::Z];
86
87 #[inline(always)]
89 #[must_use]
90 pub const fn new(x: f32, y: f32, z: f32) -> Self {
91 unsafe { UnionCast { a: [x, y, z, z] }.v }
92 }
93
94 #[inline]
96 #[must_use]
97 pub const fn splat(v: f32) -> Self {
98 unsafe { UnionCast { a: [v; 4] }.v }
99 }
100
101 #[inline]
107 #[must_use]
108 pub fn select(mask: BVec3A, if_true: Self, if_false: Self) -> Self {
109 Self(unsafe {
110 _mm_or_ps(
111 _mm_andnot_ps(mask.0, if_false.0),
112 _mm_and_ps(if_true.0, mask.0),
113 )
114 })
115 }
116
117 #[inline]
119 #[must_use]
120 pub const fn from_array(a: [f32; 3]) -> Self {
121 Self::new(a[0], a[1], a[2])
122 }
123
124 #[inline]
126 #[must_use]
127 pub const fn to_array(&self) -> [f32; 3] {
128 unsafe { *(self as *const Vec3A as *const [f32; 3]) }
129 }
130
131 #[inline]
137 #[must_use]
138 pub const fn from_slice(slice: &[f32]) -> Self {
139 Self::new(slice[0], slice[1], slice[2])
140 }
141
142 #[inline]
148 pub fn write_to_slice(self, slice: &mut [f32]) {
149 slice[0] = self.x;
150 slice[1] = self.y;
151 slice[2] = self.z;
152 }
153
154 #[allow(dead_code)]
156 #[inline]
157 #[must_use]
158 pub(crate) fn from_vec4(v: Vec4) -> Self {
159 Self(v.0)
160 }
161
162 #[inline]
164 #[must_use]
165 pub fn extend(self, w: f32) -> Vec4 {
166 Vec4::new(self.x, self.y, self.z, w)
167 }
168
169 #[inline]
173 #[must_use]
174 pub fn truncate(self) -> Vec2 {
175 use crate::swizzles::Vec3Swizzles;
176 self.xy()
177 }
178
179 #[inline]
181 #[must_use]
182 pub fn dot(self, rhs: Self) -> f32 {
183 unsafe { dot3(self.0, rhs.0) }
184 }
185
186 #[inline]
188 #[must_use]
189 pub fn dot_into_vec(self, rhs: Self) -> Self {
190 Self(unsafe { dot3_into_m128(self.0, rhs.0) })
191 }
192
193 #[inline]
195 #[must_use]
196 pub fn cross(self, rhs: Self) -> Self {
197 unsafe {
198 let lhszxy = _mm_shuffle_ps(self.0, self.0, 0b01_01_00_10);
204 let rhszxy = _mm_shuffle_ps(rhs.0, rhs.0, 0b01_01_00_10);
205 let lhszxy_rhs = _mm_mul_ps(lhszxy, rhs.0);
206 let rhszxy_lhs = _mm_mul_ps(rhszxy, self.0);
207 let sub = _mm_sub_ps(lhszxy_rhs, rhszxy_lhs);
208 Self(_mm_shuffle_ps(sub, sub, 0b01_01_00_10))
209 }
210 }
211
212 #[inline]
216 #[must_use]
217 pub fn min(self, rhs: Self) -> Self {
218 Self(unsafe { _mm_min_ps(self.0, rhs.0) })
219 }
220
221 #[inline]
225 #[must_use]
226 pub fn max(self, rhs: Self) -> Self {
227 Self(unsafe { _mm_max_ps(self.0, rhs.0) })
228 }
229
230 #[inline]
238 #[must_use]
239 pub fn clamp(self, min: Self, max: Self) -> Self {
240 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
241 self.max(min).min(max)
242 }
243
244 #[inline]
248 #[must_use]
249 pub fn min_element(self) -> f32 {
250 unsafe {
251 let v = self.0;
252 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b01_01_10_10));
253 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
254 _mm_cvtss_f32(v)
255 }
256 }
257
258 #[inline]
262 #[must_use]
263 pub fn max_element(self) -> f32 {
264 unsafe {
265 let v = self.0;
266 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_10_10));
267 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
268 _mm_cvtss_f32(v)
269 }
270 }
271
272 #[inline]
278 #[must_use]
279 pub fn cmpeq(self, rhs: Self) -> BVec3A {
280 BVec3A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
281 }
282
283 #[inline]
289 #[must_use]
290 pub fn cmpne(self, rhs: Self) -> BVec3A {
291 BVec3A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
292 }
293
294 #[inline]
300 #[must_use]
301 pub fn cmpge(self, rhs: Self) -> BVec3A {
302 BVec3A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
303 }
304
305 #[inline]
311 #[must_use]
312 pub fn cmpgt(self, rhs: Self) -> BVec3A {
313 BVec3A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
314 }
315
316 #[inline]
322 #[must_use]
323 pub fn cmple(self, rhs: Self) -> BVec3A {
324 BVec3A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
325 }
326
327 #[inline]
333 #[must_use]
334 pub fn cmplt(self, rhs: Self) -> BVec3A {
335 BVec3A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
336 }
337
338 #[inline]
340 #[must_use]
341 pub fn abs(self) -> Self {
342 Self(unsafe { crate::sse2::m128_abs(self.0) })
343 }
344
345 #[inline]
351 #[must_use]
352 pub fn signum(self) -> Self {
353 unsafe {
354 let result = Self(_mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0));
355 let mask = self.is_nan_mask();
356 Self::select(mask, self, result)
357 }
358 }
359
360 #[inline]
362 #[must_use]
363 pub fn copysign(self, rhs: Self) -> Self {
364 unsafe {
365 let mask = Self::splat(-0.0);
366 Self(_mm_or_ps(
367 _mm_and_ps(rhs.0, mask.0),
368 _mm_andnot_ps(mask.0, self.0),
369 ))
370 }
371 }
372
373 #[inline]
378 #[must_use]
379 pub fn is_negative_bitmask(self) -> u32 {
380 unsafe { (_mm_movemask_ps(self.0) as u32) & 0x7 }
381 }
382
383 #[inline]
386 #[must_use]
387 pub fn is_finite(self) -> bool {
388 self.x.is_finite() && self.y.is_finite() && self.z.is_finite()
389 }
390
391 #[inline]
393 #[must_use]
394 pub fn is_nan(self) -> bool {
395 self.is_nan_mask().any()
396 }
397
398 #[inline]
402 #[must_use]
403 pub fn is_nan_mask(self) -> BVec3A {
404 BVec3A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
405 }
406
407 #[doc(alias = "magnitude")]
409 #[inline]
410 #[must_use]
411 pub fn length(self) -> f32 {
412 unsafe {
413 let dot = dot3_in_x(self.0, self.0);
414 _mm_cvtss_f32(_mm_sqrt_ps(dot))
415 }
416 }
417
418 #[doc(alias = "magnitude2")]
422 #[inline]
423 #[must_use]
424 pub fn length_squared(self) -> f32 {
425 self.dot(self)
426 }
427
428 #[inline]
432 #[must_use]
433 pub fn length_recip(self) -> f32 {
434 unsafe {
435 let dot = dot3_in_x(self.0, self.0);
436 _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
437 }
438 }
439
440 #[inline]
442 #[must_use]
443 pub fn distance(self, rhs: Self) -> f32 {
444 (self - rhs).length()
445 }
446
447 #[inline]
449 #[must_use]
450 pub fn distance_squared(self, rhs: Self) -> f32 {
451 (self - rhs).length_squared()
452 }
453
454 #[inline]
456 #[must_use]
457 pub fn div_euclid(self, rhs: Self) -> Self {
458 Self::new(
459 math::div_euclid(self.x, rhs.x),
460 math::div_euclid(self.y, rhs.y),
461 math::div_euclid(self.z, rhs.z),
462 )
463 }
464
465 #[inline]
469 #[must_use]
470 pub fn rem_euclid(self, rhs: Self) -> Self {
471 Self::new(
472 math::rem_euclid(self.x, rhs.x),
473 math::rem_euclid(self.y, rhs.y),
474 math::rem_euclid(self.z, rhs.z),
475 )
476 }
477
478 #[inline]
488 #[must_use]
489 pub fn normalize(self) -> Self {
490 unsafe {
491 let length = _mm_sqrt_ps(dot3_into_m128(self.0, self.0));
492 #[allow(clippy::let_and_return)]
493 let normalized = Self(_mm_div_ps(self.0, length));
494 glam_assert!(normalized.is_finite());
495 normalized
496 }
497 }
498
499 #[inline]
506 #[must_use]
507 pub fn try_normalize(self) -> Option<Self> {
508 let rcp = self.length_recip();
509 if rcp.is_finite() && rcp > 0.0 {
510 Some(self * rcp)
511 } else {
512 None
513 }
514 }
515
516 #[inline]
523 #[must_use]
524 pub fn normalize_or_zero(self) -> Self {
525 let rcp = self.length_recip();
526 if rcp.is_finite() && rcp > 0.0 {
527 self * rcp
528 } else {
529 Self::ZERO
530 }
531 }
532
533 #[inline]
537 #[must_use]
538 pub fn is_normalized(self) -> bool {
539 math::abs(self.length_squared() - 1.0) <= 1e-4
541 }
542
543 #[inline]
551 #[must_use]
552 pub fn project_onto(self, rhs: Self) -> Self {
553 let other_len_sq_rcp = rhs.dot(rhs).recip();
554 glam_assert!(other_len_sq_rcp.is_finite());
555 rhs * self.dot(rhs) * other_len_sq_rcp
556 }
557
558 #[inline]
569 #[must_use]
570 pub fn reject_from(self, rhs: Self) -> Self {
571 self - self.project_onto(rhs)
572 }
573
574 #[inline]
582 #[must_use]
583 pub fn project_onto_normalized(self, rhs: Self) -> Self {
584 glam_assert!(rhs.is_normalized());
585 rhs * self.dot(rhs)
586 }
587
588 #[inline]
599 #[must_use]
600 pub fn reject_from_normalized(self, rhs: Self) -> Self {
601 self - self.project_onto_normalized(rhs)
602 }
603
604 #[inline]
607 #[must_use]
608 pub fn round(self) -> Self {
609 Self(unsafe { m128_round(self.0) })
610 }
611
612 #[inline]
615 #[must_use]
616 pub fn floor(self) -> Self {
617 Self(unsafe { m128_floor(self.0) })
618 }
619
620 #[inline]
623 #[must_use]
624 pub fn ceil(self) -> Self {
625 Self(unsafe { m128_ceil(self.0) })
626 }
627
628 #[inline]
631 #[must_use]
632 pub fn trunc(self) -> Self {
633 Self(unsafe { m128_trunc(self.0) })
634 }
635
636 #[inline]
641 #[must_use]
642 pub fn fract(self) -> Self {
643 self - self.floor()
644 }
645
646 #[inline]
649 #[must_use]
650 pub fn exp(self) -> Self {
651 Self::new(math::exp(self.x), math::exp(self.y), math::exp(self.z))
652 }
653
654 #[inline]
656 #[must_use]
657 pub fn powf(self, n: f32) -> Self {
658 Self::new(
659 math::powf(self.x, n),
660 math::powf(self.y, n),
661 math::powf(self.z, n),
662 )
663 }
664
665 #[inline]
667 #[must_use]
668 pub fn recip(self) -> Self {
669 Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
670 }
671
672 #[doc(alias = "mix")]
678 #[inline]
679 #[must_use]
680 pub fn lerp(self, rhs: Self, s: f32) -> Self {
681 self + ((rhs - self) * s)
682 }
683
684 #[inline]
694 #[must_use]
695 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
696 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
697 }
698
699 #[inline]
705 #[must_use]
706 pub fn clamp_length(self, min: f32, max: f32) -> Self {
707 glam_assert!(min <= max);
708 let length_sq = self.length_squared();
709 if length_sq < min * min {
710 min * (self / math::sqrt(length_sq))
711 } else if length_sq > max * max {
712 max * (self / math::sqrt(length_sq))
713 } else {
714 self
715 }
716 }
717
718 #[inline]
720 #[must_use]
721 pub fn clamp_length_max(self, max: f32) -> Self {
722 let length_sq = self.length_squared();
723 if length_sq > max * max {
724 max * (self / math::sqrt(length_sq))
725 } else {
726 self
727 }
728 }
729
730 #[inline]
732 #[must_use]
733 pub fn clamp_length_min(self, min: f32) -> Self {
734 let length_sq = self.length_squared();
735 if length_sq < min * min {
736 min * (self / math::sqrt(length_sq))
737 } else {
738 self
739 }
740 }
741
742 #[inline]
750 #[must_use]
751 pub fn mul_add(self, a: Self, b: Self) -> Self {
752 #[cfg(target_feature = "fma")]
753 unsafe {
754 Self(_mm_fmadd_ps(self.0, a.0, b.0))
755 }
756 #[cfg(not(target_feature = "fma"))]
757 Self::new(
758 math::mul_add(self.x, a.x, b.x),
759 math::mul_add(self.y, a.y, b.y),
760 math::mul_add(self.z, a.z, b.z),
761 )
762 }
763
764 #[inline]
768 #[must_use]
769 pub fn angle_between(self, rhs: Self) -> f32 {
770 math::acos_approx(
771 self.dot(rhs)
772 .div(math::sqrt(self.length_squared().mul(rhs.length_squared()))),
773 )
774 }
775
776 #[inline]
783 #[must_use]
784 pub fn any_orthogonal_vector(&self) -> Self {
785 if math::abs(self.x) > math::abs(self.y) {
787 Self::new(-self.z, 0.0, self.x) } else {
789 Self::new(0.0, self.z, -self.y) }
791 }
792
793 #[inline]
801 #[must_use]
802 pub fn any_orthonormal_vector(&self) -> Self {
803 glam_assert!(self.is_normalized());
804 let sign = math::signum(self.z);
806 let a = -1.0 / (sign + self.z);
807 let b = self.x * self.y * a;
808 Self::new(b, sign + self.y * self.y * a, -self.y)
809 }
810
811 #[inline]
818 #[must_use]
819 pub fn any_orthonormal_pair(&self) -> (Self, Self) {
820 glam_assert!(self.is_normalized());
821 let sign = math::signum(self.z);
823 let a = -1.0 / (sign + self.z);
824 let b = self.x * self.y * a;
825 (
826 Self::new(1.0 + sign * self.x * self.x * a, sign * b, -sign * self.x),
827 Self::new(b, sign + self.y * self.y * a, -self.y),
828 )
829 }
830
831 #[inline]
833 #[must_use]
834 pub fn as_dvec3(&self) -> crate::DVec3 {
835 crate::DVec3::new(self.x as f64, self.y as f64, self.z as f64)
836 }
837
838 #[inline]
840 #[must_use]
841 pub fn as_i16vec3(&self) -> crate::I16Vec3 {
842 crate::I16Vec3::new(self.x as i16, self.y as i16, self.z as i16)
843 }
844
845 #[inline]
847 #[must_use]
848 pub fn as_u16vec3(&self) -> crate::U16Vec3 {
849 crate::U16Vec3::new(self.x as u16, self.y as u16, self.z as u16)
850 }
851
852 #[inline]
854 #[must_use]
855 pub fn as_ivec3(&self) -> crate::IVec3 {
856 crate::IVec3::new(self.x as i32, self.y as i32, self.z as i32)
857 }
858
859 #[inline]
861 #[must_use]
862 pub fn as_uvec3(&self) -> crate::UVec3 {
863 crate::UVec3::new(self.x as u32, self.y as u32, self.z as u32)
864 }
865
866 #[inline]
868 #[must_use]
869 pub fn as_i64vec3(&self) -> crate::I64Vec3 {
870 crate::I64Vec3::new(self.x as i64, self.y as i64, self.z as i64)
871 }
872
873 #[inline]
875 #[must_use]
876 pub fn as_u64vec3(&self) -> crate::U64Vec3 {
877 crate::U64Vec3::new(self.x as u64, self.y as u64, self.z as u64)
878 }
879}
880
881impl Default for Vec3A {
882 #[inline(always)]
883 fn default() -> Self {
884 Self::ZERO
885 }
886}
887
888impl PartialEq for Vec3A {
889 #[inline]
890 fn eq(&self, rhs: &Self) -> bool {
891 self.cmpeq(*rhs).all()
892 }
893}
894
895impl Div<Vec3A> for Vec3A {
896 type Output = Self;
897 #[inline]
898 fn div(self, rhs: Self) -> Self {
899 Self(unsafe { _mm_div_ps(self.0, rhs.0) })
900 }
901}
902
903impl DivAssign<Vec3A> for Vec3A {
904 #[inline]
905 fn div_assign(&mut self, rhs: Self) {
906 self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
907 }
908}
909
910impl Div<f32> for Vec3A {
911 type Output = Self;
912 #[inline]
913 fn div(self, rhs: f32) -> Self {
914 Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
915 }
916}
917
918impl DivAssign<f32> for Vec3A {
919 #[inline]
920 fn div_assign(&mut self, rhs: f32) {
921 self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
922 }
923}
924
925impl Div<Vec3A> for f32 {
926 type Output = Vec3A;
927 #[inline]
928 fn div(self, rhs: Vec3A) -> Vec3A {
929 Vec3A(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
930 }
931}
932
933impl Mul<Vec3A> for Vec3A {
934 type Output = Self;
935 #[inline]
936 fn mul(self, rhs: Self) -> Self {
937 Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
938 }
939}
940
941impl MulAssign<Vec3A> for Vec3A {
942 #[inline]
943 fn mul_assign(&mut self, rhs: Self) {
944 self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
945 }
946}
947
948impl Mul<f32> for Vec3A {
949 type Output = Self;
950 #[inline]
951 fn mul(self, rhs: f32) -> Self {
952 Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
953 }
954}
955
956impl MulAssign<f32> for Vec3A {
957 #[inline]
958 fn mul_assign(&mut self, rhs: f32) {
959 self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
960 }
961}
962
963impl Mul<Vec3A> for f32 {
964 type Output = Vec3A;
965 #[inline]
966 fn mul(self, rhs: Vec3A) -> Vec3A {
967 Vec3A(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
968 }
969}
970
971impl Add<Vec3A> for Vec3A {
972 type Output = Self;
973 #[inline]
974 fn add(self, rhs: Self) -> Self {
975 Self(unsafe { _mm_add_ps(self.0, rhs.0) })
976 }
977}
978
979impl AddAssign<Vec3A> for Vec3A {
980 #[inline]
981 fn add_assign(&mut self, rhs: Self) {
982 self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
983 }
984}
985
986impl Add<f32> for Vec3A {
987 type Output = Self;
988 #[inline]
989 fn add(self, rhs: f32) -> Self {
990 Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
991 }
992}
993
994impl AddAssign<f32> for Vec3A {
995 #[inline]
996 fn add_assign(&mut self, rhs: f32) {
997 self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
998 }
999}
1000
1001impl Add<Vec3A> for f32 {
1002 type Output = Vec3A;
1003 #[inline]
1004 fn add(self, rhs: Vec3A) -> Vec3A {
1005 Vec3A(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
1006 }
1007}
1008
1009impl Sub<Vec3A> for Vec3A {
1010 type Output = Self;
1011 #[inline]
1012 fn sub(self, rhs: Self) -> Self {
1013 Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
1014 }
1015}
1016
1017impl SubAssign<Vec3A> for Vec3A {
1018 #[inline]
1019 fn sub_assign(&mut self, rhs: Vec3A) {
1020 self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
1021 }
1022}
1023
1024impl Sub<f32> for Vec3A {
1025 type Output = Self;
1026 #[inline]
1027 fn sub(self, rhs: f32) -> Self {
1028 Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
1029 }
1030}
1031
1032impl SubAssign<f32> for Vec3A {
1033 #[inline]
1034 fn sub_assign(&mut self, rhs: f32) {
1035 self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
1036 }
1037}
1038
1039impl Sub<Vec3A> for f32 {
1040 type Output = Vec3A;
1041 #[inline]
1042 fn sub(self, rhs: Vec3A) -> Vec3A {
1043 Vec3A(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
1044 }
1045}
1046
1047impl Rem<Vec3A> for Vec3A {
1048 type Output = Self;
1049 #[inline]
1050 fn rem(self, rhs: Self) -> Self {
1051 unsafe {
1052 let n = m128_floor(_mm_div_ps(self.0, rhs.0));
1053 Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
1054 }
1055 }
1056}
1057
1058impl RemAssign<Vec3A> for Vec3A {
1059 #[inline]
1060 fn rem_assign(&mut self, rhs: Self) {
1061 *self = self.rem(rhs);
1062 }
1063}
1064
1065impl Rem<f32> for Vec3A {
1066 type Output = Self;
1067 #[inline]
1068 fn rem(self, rhs: f32) -> Self {
1069 self.rem(Self::splat(rhs))
1070 }
1071}
1072
1073impl RemAssign<f32> for Vec3A {
1074 #[inline]
1075 fn rem_assign(&mut self, rhs: f32) {
1076 *self = self.rem(Self::splat(rhs));
1077 }
1078}
1079
1080impl Rem<Vec3A> for f32 {
1081 type Output = Vec3A;
1082 #[inline]
1083 fn rem(self, rhs: Vec3A) -> Vec3A {
1084 Vec3A::splat(self).rem(rhs)
1085 }
1086}
1087
1088#[cfg(not(target_arch = "spirv"))]
1089impl AsRef<[f32; 3]> for Vec3A {
1090 #[inline]
1091 fn as_ref(&self) -> &[f32; 3] {
1092 unsafe { &*(self as *const Vec3A as *const [f32; 3]) }
1093 }
1094}
1095
1096#[cfg(not(target_arch = "spirv"))]
1097impl AsMut<[f32; 3]> for Vec3A {
1098 #[inline]
1099 fn as_mut(&mut self) -> &mut [f32; 3] {
1100 unsafe { &mut *(self as *mut Vec3A as *mut [f32; 3]) }
1101 }
1102}
1103
1104impl Sum for Vec3A {
1105 #[inline]
1106 fn sum<I>(iter: I) -> Self
1107 where
1108 I: Iterator<Item = Self>,
1109 {
1110 iter.fold(Self::ZERO, Self::add)
1111 }
1112}
1113
1114impl<'a> Sum<&'a Self> for Vec3A {
1115 #[inline]
1116 fn sum<I>(iter: I) -> Self
1117 where
1118 I: Iterator<Item = &'a Self>,
1119 {
1120 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1121 }
1122}
1123
1124impl Product for Vec3A {
1125 #[inline]
1126 fn product<I>(iter: I) -> Self
1127 where
1128 I: Iterator<Item = Self>,
1129 {
1130 iter.fold(Self::ONE, Self::mul)
1131 }
1132}
1133
1134impl<'a> Product<&'a Self> for Vec3A {
1135 #[inline]
1136 fn product<I>(iter: I) -> Self
1137 where
1138 I: Iterator<Item = &'a Self>,
1139 {
1140 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1141 }
1142}
1143
1144impl Neg for Vec3A {
1145 type Output = Self;
1146 #[inline]
1147 fn neg(self) -> Self {
1148 Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1149 }
1150}
1151
1152impl Index<usize> for Vec3A {
1153 type Output = f32;
1154 #[inline]
1155 fn index(&self, index: usize) -> &Self::Output {
1156 match index {
1157 0 => &self.x,
1158 1 => &self.y,
1159 2 => &self.z,
1160 _ => panic!("index out of bounds"),
1161 }
1162 }
1163}
1164
1165impl IndexMut<usize> for Vec3A {
1166 #[inline]
1167 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1168 match index {
1169 0 => &mut self.x,
1170 1 => &mut self.y,
1171 2 => &mut self.z,
1172 _ => panic!("index out of bounds"),
1173 }
1174 }
1175}
1176
1177#[cfg(not(target_arch = "spirv"))]
1178impl fmt::Display for Vec3A {
1179 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1180 write!(f, "[{}, {}, {}]", self.x, self.y, self.z)
1181 }
1182}
1183
1184#[cfg(not(target_arch = "spirv"))]
1185impl fmt::Debug for Vec3A {
1186 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1187 fmt.debug_tuple(stringify!(Vec3A))
1188 .field(&self.x)
1189 .field(&self.y)
1190 .field(&self.z)
1191 .finish()
1192 }
1193}
1194
1195impl From<Vec3A> for __m128 {
1196 #[inline]
1197 fn from(t: Vec3A) -> Self {
1198 t.0
1199 }
1200}
1201
1202impl From<__m128> for Vec3A {
1203 #[inline]
1204 fn from(t: __m128) -> Self {
1205 Self(t)
1206 }
1207}
1208
1209impl From<[f32; 3]> for Vec3A {
1210 #[inline]
1211 fn from(a: [f32; 3]) -> Self {
1212 Self::new(a[0], a[1], a[2])
1213 }
1214}
1215
1216impl From<Vec3A> for [f32; 3] {
1217 #[inline]
1218 fn from(v: Vec3A) -> Self {
1219 use crate::Align16;
1220 use core::mem::MaybeUninit;
1221 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1222 unsafe {
1223 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1224 out.assume_init().0
1225 }
1226 }
1227}
1228
1229impl From<(f32, f32, f32)> for Vec3A {
1230 #[inline]
1231 fn from(t: (f32, f32, f32)) -> Self {
1232 Self::new(t.0, t.1, t.2)
1233 }
1234}
1235
1236impl From<Vec3A> for (f32, f32, f32) {
1237 #[inline]
1238 fn from(v: Vec3A) -> Self {
1239 use crate::Align16;
1240 use core::mem::MaybeUninit;
1241 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1242 unsafe {
1243 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1244 out.assume_init().0
1245 }
1246 }
1247}
1248
1249impl From<Vec3> for Vec3A {
1250 #[inline]
1251 fn from(v: Vec3) -> Self {
1252 Self::new(v.x, v.y, v.z)
1253 }
1254}
1255
1256impl From<Vec4> for Vec3A {
1257 #[inline]
1261 fn from(v: Vec4) -> Self {
1262 Self(v.0)
1263 }
1264}
1265
1266impl From<Vec3A> for Vec3 {
1267 #[inline]
1268 fn from(v: Vec3A) -> Self {
1269 use crate::Align16;
1270 use core::mem::MaybeUninit;
1271 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1272 unsafe {
1273 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1274 out.assume_init().0
1275 }
1276 }
1277}
1278
1279impl From<(Vec2, f32)> for Vec3A {
1280 #[inline]
1281 fn from((v, z): (Vec2, f32)) -> Self {
1282 Self::new(v.x, v.y, z)
1283 }
1284}
1285
1286impl Deref for Vec3A {
1287 type Target = crate::deref::Vec3<f32>;
1288 #[inline]
1289 fn deref(&self) -> &Self::Target {
1290 unsafe { &*(self as *const Self).cast() }
1291 }
1292}
1293
1294impl DerefMut for Vec3A {
1295 #[inline]
1296 fn deref_mut(&mut self) -> &mut Self::Target {
1297 unsafe { &mut *(self as *mut Self).cast() }
1298 }
1299}