1use crate::{f32::math, sse2::*, BVec4A, Vec2, Vec3, Vec3A};
4
5#[cfg(not(target_arch = "spirv"))]
6use core::fmt;
7use core::iter::{Product, Sum};
8use core::{f32, ops::*};
9
10#[cfg(target_arch = "x86")]
11use core::arch::x86::*;
12#[cfg(target_arch = "x86_64")]
13use core::arch::x86_64::*;
14
15#[repr(C)]
16union UnionCast {
17 a: [f32; 4],
18 v: Vec4,
19}
20
21#[inline(always)]
23#[must_use]
24pub const fn vec4(x: f32, y: f32, z: f32, w: f32) -> Vec4 {
25 Vec4::new(x, y, z, w)
26}
27
28#[derive(Clone, Copy)]
34#[repr(transparent)]
35pub struct Vec4(pub(crate) __m128);
36
37impl Vec4 {
38 pub const ZERO: Self = Self::splat(0.0);
40
41 pub const ONE: Self = Self::splat(1.0);
43
44 pub const NEG_ONE: Self = Self::splat(-1.0);
46
47 pub const MIN: Self = Self::splat(f32::MIN);
49
50 pub const MAX: Self = Self::splat(f32::MAX);
52
53 pub const NAN: Self = Self::splat(f32::NAN);
55
56 pub const INFINITY: Self = Self::splat(f32::INFINITY);
58
59 pub const NEG_INFINITY: Self = Self::splat(f32::NEG_INFINITY);
61
62 pub const X: Self = Self::new(1.0, 0.0, 0.0, 0.0);
64
65 pub const Y: Self = Self::new(0.0, 1.0, 0.0, 0.0);
67
68 pub const Z: Self = Self::new(0.0, 0.0, 1.0, 0.0);
70
71 pub const W: Self = Self::new(0.0, 0.0, 0.0, 1.0);
73
74 pub const NEG_X: Self = Self::new(-1.0, 0.0, 0.0, 0.0);
76
77 pub const NEG_Y: Self = Self::new(0.0, -1.0, 0.0, 0.0);
79
80 pub const NEG_Z: Self = Self::new(0.0, 0.0, -1.0, 0.0);
82
83 pub const NEG_W: Self = Self::new(0.0, 0.0, 0.0, -1.0);
85
86 pub const AXES: [Self; 4] = [Self::X, Self::Y, Self::Z, Self::W];
88
89 #[inline(always)]
91 #[must_use]
92 pub const fn new(x: f32, y: f32, z: f32, w: f32) -> Self {
93 unsafe { UnionCast { a: [x, y, z, w] }.v }
94 }
95
96 #[inline]
98 #[must_use]
99 pub const fn splat(v: f32) -> Self {
100 unsafe { UnionCast { a: [v; 4] }.v }
101 }
102
103 #[inline]
109 #[must_use]
110 pub fn select(mask: BVec4A, if_true: Self, if_false: Self) -> Self {
111 Self(unsafe {
112 _mm_or_ps(
113 _mm_andnot_ps(mask.0, if_false.0),
114 _mm_and_ps(if_true.0, mask.0),
115 )
116 })
117 }
118
119 #[inline]
121 #[must_use]
122 pub const fn from_array(a: [f32; 4]) -> Self {
123 Self::new(a[0], a[1], a[2], a[3])
124 }
125
126 #[inline]
128 #[must_use]
129 pub const fn to_array(&self) -> [f32; 4] {
130 unsafe { *(self as *const Vec4 as *const [f32; 4]) }
131 }
132
133 #[inline]
139 #[must_use]
140 pub const fn from_slice(slice: &[f32]) -> Self {
141 Self::new(slice[0], slice[1], slice[2], slice[3])
142 }
143
144 #[inline]
150 pub fn write_to_slice(self, slice: &mut [f32]) {
151 unsafe {
152 assert!(slice.len() >= 4);
153 _mm_storeu_ps(slice.as_mut_ptr(), self.0);
154 }
155 }
156
157 #[inline]
163 #[must_use]
164 pub fn truncate(self) -> Vec3 {
165 use crate::swizzles::Vec4Swizzles;
166 self.xyz()
167 }
168
169 #[inline]
171 #[must_use]
172 pub fn dot(self, rhs: Self) -> f32 {
173 unsafe { dot4(self.0, rhs.0) }
174 }
175
176 #[inline]
178 #[must_use]
179 pub fn dot_into_vec(self, rhs: Self) -> Self {
180 Self(unsafe { dot4_into_m128(self.0, rhs.0) })
181 }
182
183 #[inline]
187 #[must_use]
188 pub fn min(self, rhs: Self) -> Self {
189 Self(unsafe { _mm_min_ps(self.0, rhs.0) })
190 }
191
192 #[inline]
196 #[must_use]
197 pub fn max(self, rhs: Self) -> Self {
198 Self(unsafe { _mm_max_ps(self.0, rhs.0) })
199 }
200
201 #[inline]
209 #[must_use]
210 pub fn clamp(self, min: Self, max: Self) -> Self {
211 glam_assert!(min.cmple(max).all(), "clamp: expected min <= max");
212 self.max(min).min(max)
213 }
214
215 #[inline]
219 #[must_use]
220 pub fn min_element(self) -> f32 {
221 unsafe {
222 let v = self.0;
223 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
224 let v = _mm_min_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
225 _mm_cvtss_f32(v)
226 }
227 }
228
229 #[inline]
233 #[must_use]
234 pub fn max_element(self) -> f32 {
235 unsafe {
236 let v = self.0;
237 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_11_10));
238 let v = _mm_max_ps(v, _mm_shuffle_ps(v, v, 0b00_00_00_01));
239 _mm_cvtss_f32(v)
240 }
241 }
242
243 #[inline]
249 #[must_use]
250 pub fn cmpeq(self, rhs: Self) -> BVec4A {
251 BVec4A(unsafe { _mm_cmpeq_ps(self.0, rhs.0) })
252 }
253
254 #[inline]
260 #[must_use]
261 pub fn cmpne(self, rhs: Self) -> BVec4A {
262 BVec4A(unsafe { _mm_cmpneq_ps(self.0, rhs.0) })
263 }
264
265 #[inline]
271 #[must_use]
272 pub fn cmpge(self, rhs: Self) -> BVec4A {
273 BVec4A(unsafe { _mm_cmpge_ps(self.0, rhs.0) })
274 }
275
276 #[inline]
282 #[must_use]
283 pub fn cmpgt(self, rhs: Self) -> BVec4A {
284 BVec4A(unsafe { _mm_cmpgt_ps(self.0, rhs.0) })
285 }
286
287 #[inline]
293 #[must_use]
294 pub fn cmple(self, rhs: Self) -> BVec4A {
295 BVec4A(unsafe { _mm_cmple_ps(self.0, rhs.0) })
296 }
297
298 #[inline]
304 #[must_use]
305 pub fn cmplt(self, rhs: Self) -> BVec4A {
306 BVec4A(unsafe { _mm_cmplt_ps(self.0, rhs.0) })
307 }
308
309 #[inline]
311 #[must_use]
312 pub fn abs(self) -> Self {
313 Self(unsafe { crate::sse2::m128_abs(self.0) })
314 }
315
316 #[inline]
322 #[must_use]
323 pub fn signum(self) -> Self {
324 unsafe {
325 let result = Self(_mm_or_ps(_mm_and_ps(self.0, Self::NEG_ONE.0), Self::ONE.0));
326 let mask = self.is_nan_mask();
327 Self::select(mask, self, result)
328 }
329 }
330
331 #[inline]
333 #[must_use]
334 pub fn copysign(self, rhs: Self) -> Self {
335 unsafe {
336 let mask = Self::splat(-0.0);
337 Self(_mm_or_ps(
338 _mm_and_ps(rhs.0, mask.0),
339 _mm_andnot_ps(mask.0, self.0),
340 ))
341 }
342 }
343
344 #[inline]
349 #[must_use]
350 pub fn is_negative_bitmask(self) -> u32 {
351 unsafe { _mm_movemask_ps(self.0) as u32 }
352 }
353
354 #[inline]
357 #[must_use]
358 pub fn is_finite(self) -> bool {
359 self.x.is_finite() && self.y.is_finite() && self.z.is_finite() && self.w.is_finite()
360 }
361
362 #[inline]
364 #[must_use]
365 pub fn is_nan(self) -> bool {
366 self.is_nan_mask().any()
367 }
368
369 #[inline]
373 #[must_use]
374 pub fn is_nan_mask(self) -> BVec4A {
375 BVec4A(unsafe { _mm_cmpunord_ps(self.0, self.0) })
376 }
377
378 #[doc(alias = "magnitude")]
380 #[inline]
381 #[must_use]
382 pub fn length(self) -> f32 {
383 unsafe {
384 let dot = dot4_in_x(self.0, self.0);
385 _mm_cvtss_f32(_mm_sqrt_ps(dot))
386 }
387 }
388
389 #[doc(alias = "magnitude2")]
393 #[inline]
394 #[must_use]
395 pub fn length_squared(self) -> f32 {
396 self.dot(self)
397 }
398
399 #[inline]
403 #[must_use]
404 pub fn length_recip(self) -> f32 {
405 unsafe {
406 let dot = dot4_in_x(self.0, self.0);
407 _mm_cvtss_f32(_mm_div_ps(Self::ONE.0, _mm_sqrt_ps(dot)))
408 }
409 }
410
411 #[inline]
413 #[must_use]
414 pub fn distance(self, rhs: Self) -> f32 {
415 (self - rhs).length()
416 }
417
418 #[inline]
420 #[must_use]
421 pub fn distance_squared(self, rhs: Self) -> f32 {
422 (self - rhs).length_squared()
423 }
424
425 #[inline]
427 #[must_use]
428 pub fn div_euclid(self, rhs: Self) -> Self {
429 Self::new(
430 math::div_euclid(self.x, rhs.x),
431 math::div_euclid(self.y, rhs.y),
432 math::div_euclid(self.z, rhs.z),
433 math::div_euclid(self.w, rhs.w),
434 )
435 }
436
437 #[inline]
441 #[must_use]
442 pub fn rem_euclid(self, rhs: Self) -> Self {
443 Self::new(
444 math::rem_euclid(self.x, rhs.x),
445 math::rem_euclid(self.y, rhs.y),
446 math::rem_euclid(self.z, rhs.z),
447 math::rem_euclid(self.w, rhs.w),
448 )
449 }
450
451 #[inline]
461 #[must_use]
462 pub fn normalize(self) -> Self {
463 unsafe {
464 let length = _mm_sqrt_ps(dot4_into_m128(self.0, self.0));
465 #[allow(clippy::let_and_return)]
466 let normalized = Self(_mm_div_ps(self.0, length));
467 glam_assert!(normalized.is_finite());
468 normalized
469 }
470 }
471
472 #[inline]
479 #[must_use]
480 pub fn try_normalize(self) -> Option<Self> {
481 let rcp = self.length_recip();
482 if rcp.is_finite() && rcp > 0.0 {
483 Some(self * rcp)
484 } else {
485 None
486 }
487 }
488
489 #[inline]
496 #[must_use]
497 pub fn normalize_or_zero(self) -> Self {
498 let rcp = self.length_recip();
499 if rcp.is_finite() && rcp > 0.0 {
500 self * rcp
501 } else {
502 Self::ZERO
503 }
504 }
505
506 #[inline]
510 #[must_use]
511 pub fn is_normalized(self) -> bool {
512 math::abs(self.length_squared() - 1.0) <= 1e-4
514 }
515
516 #[inline]
524 #[must_use]
525 pub fn project_onto(self, rhs: Self) -> Self {
526 let other_len_sq_rcp = rhs.dot(rhs).recip();
527 glam_assert!(other_len_sq_rcp.is_finite());
528 rhs * self.dot(rhs) * other_len_sq_rcp
529 }
530
531 #[inline]
542 #[must_use]
543 pub fn reject_from(self, rhs: Self) -> Self {
544 self - self.project_onto(rhs)
545 }
546
547 #[inline]
555 #[must_use]
556 pub fn project_onto_normalized(self, rhs: Self) -> Self {
557 glam_assert!(rhs.is_normalized());
558 rhs * self.dot(rhs)
559 }
560
561 #[inline]
572 #[must_use]
573 pub fn reject_from_normalized(self, rhs: Self) -> Self {
574 self - self.project_onto_normalized(rhs)
575 }
576
577 #[inline]
580 #[must_use]
581 pub fn round(self) -> Self {
582 Self(unsafe { m128_round(self.0) })
583 }
584
585 #[inline]
588 #[must_use]
589 pub fn floor(self) -> Self {
590 Self(unsafe { m128_floor(self.0) })
591 }
592
593 #[inline]
596 #[must_use]
597 pub fn ceil(self) -> Self {
598 Self(unsafe { m128_ceil(self.0) })
599 }
600
601 #[inline]
604 #[must_use]
605 pub fn trunc(self) -> Self {
606 Self(unsafe { m128_trunc(self.0) })
607 }
608
609 #[inline]
614 #[must_use]
615 pub fn fract(self) -> Self {
616 self - self.floor()
617 }
618
619 #[inline]
622 #[must_use]
623 pub fn exp(self) -> Self {
624 Self::new(
625 math::exp(self.x),
626 math::exp(self.y),
627 math::exp(self.z),
628 math::exp(self.w),
629 )
630 }
631
632 #[inline]
634 #[must_use]
635 pub fn powf(self, n: f32) -> Self {
636 Self::new(
637 math::powf(self.x, n),
638 math::powf(self.y, n),
639 math::powf(self.z, n),
640 math::powf(self.w, n),
641 )
642 }
643
644 #[inline]
646 #[must_use]
647 pub fn recip(self) -> Self {
648 Self(unsafe { _mm_div_ps(Self::ONE.0, self.0) })
649 }
650
651 #[doc(alias = "mix")]
657 #[inline]
658 #[must_use]
659 pub fn lerp(self, rhs: Self, s: f32) -> Self {
660 self + ((rhs - self) * s)
661 }
662
663 #[inline]
673 #[must_use]
674 pub fn abs_diff_eq(self, rhs: Self, max_abs_diff: f32) -> bool {
675 self.sub(rhs).abs().cmple(Self::splat(max_abs_diff)).all()
676 }
677
678 #[inline]
684 #[must_use]
685 pub fn clamp_length(self, min: f32, max: f32) -> Self {
686 glam_assert!(min <= max);
687 let length_sq = self.length_squared();
688 if length_sq < min * min {
689 min * (self / math::sqrt(length_sq))
690 } else if length_sq > max * max {
691 max * (self / math::sqrt(length_sq))
692 } else {
693 self
694 }
695 }
696
697 #[inline]
699 #[must_use]
700 pub fn clamp_length_max(self, max: f32) -> Self {
701 let length_sq = self.length_squared();
702 if length_sq > max * max {
703 max * (self / math::sqrt(length_sq))
704 } else {
705 self
706 }
707 }
708
709 #[inline]
711 #[must_use]
712 pub fn clamp_length_min(self, min: f32) -> Self {
713 let length_sq = self.length_squared();
714 if length_sq < min * min {
715 min * (self / math::sqrt(length_sq))
716 } else {
717 self
718 }
719 }
720
721 #[inline]
729 #[must_use]
730 pub fn mul_add(self, a: Self, b: Self) -> Self {
731 #[cfg(target_feature = "fma")]
732 unsafe {
733 Self(_mm_fmadd_ps(self.0, a.0, b.0))
734 }
735 #[cfg(not(target_feature = "fma"))]
736 Self::new(
737 math::mul_add(self.x, a.x, b.x),
738 math::mul_add(self.y, a.y, b.y),
739 math::mul_add(self.z, a.z, b.z),
740 math::mul_add(self.w, a.w, b.w),
741 )
742 }
743
744 #[inline]
746 #[must_use]
747 pub fn as_dvec4(&self) -> crate::DVec4 {
748 crate::DVec4::new(self.x as f64, self.y as f64, self.z as f64, self.w as f64)
749 }
750
751 #[inline]
753 #[must_use]
754 pub fn as_i16vec4(&self) -> crate::I16Vec4 {
755 crate::I16Vec4::new(self.x as i16, self.y as i16, self.z as i16, self.w as i16)
756 }
757
758 #[inline]
760 #[must_use]
761 pub fn as_u16vec4(&self) -> crate::U16Vec4 {
762 crate::U16Vec4::new(self.x as u16, self.y as u16, self.z as u16, self.w as u16)
763 }
764
765 #[inline]
767 #[must_use]
768 pub fn as_ivec4(&self) -> crate::IVec4 {
769 crate::IVec4::new(self.x as i32, self.y as i32, self.z as i32, self.w as i32)
770 }
771
772 #[inline]
774 #[must_use]
775 pub fn as_uvec4(&self) -> crate::UVec4 {
776 crate::UVec4::new(self.x as u32, self.y as u32, self.z as u32, self.w as u32)
777 }
778
779 #[inline]
781 #[must_use]
782 pub fn as_i64vec4(&self) -> crate::I64Vec4 {
783 crate::I64Vec4::new(self.x as i64, self.y as i64, self.z as i64, self.w as i64)
784 }
785
786 #[inline]
788 #[must_use]
789 pub fn as_u64vec4(&self) -> crate::U64Vec4 {
790 crate::U64Vec4::new(self.x as u64, self.y as u64, self.z as u64, self.w as u64)
791 }
792}
793
794impl Default for Vec4 {
795 #[inline(always)]
796 fn default() -> Self {
797 Self::ZERO
798 }
799}
800
801impl PartialEq for Vec4 {
802 #[inline]
803 fn eq(&self, rhs: &Self) -> bool {
804 self.cmpeq(*rhs).all()
805 }
806}
807
808impl Div<Vec4> for Vec4 {
809 type Output = Self;
810 #[inline]
811 fn div(self, rhs: Self) -> Self {
812 Self(unsafe { _mm_div_ps(self.0, rhs.0) })
813 }
814}
815
816impl DivAssign<Vec4> for Vec4 {
817 #[inline]
818 fn div_assign(&mut self, rhs: Self) {
819 self.0 = unsafe { _mm_div_ps(self.0, rhs.0) };
820 }
821}
822
823impl Div<f32> for Vec4 {
824 type Output = Self;
825 #[inline]
826 fn div(self, rhs: f32) -> Self {
827 Self(unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) })
828 }
829}
830
831impl DivAssign<f32> for Vec4 {
832 #[inline]
833 fn div_assign(&mut self, rhs: f32) {
834 self.0 = unsafe { _mm_div_ps(self.0, _mm_set1_ps(rhs)) };
835 }
836}
837
838impl Div<Vec4> for f32 {
839 type Output = Vec4;
840 #[inline]
841 fn div(self, rhs: Vec4) -> Vec4 {
842 Vec4(unsafe { _mm_div_ps(_mm_set1_ps(self), rhs.0) })
843 }
844}
845
846impl Mul<Vec4> for Vec4 {
847 type Output = Self;
848 #[inline]
849 fn mul(self, rhs: Self) -> Self {
850 Self(unsafe { _mm_mul_ps(self.0, rhs.0) })
851 }
852}
853
854impl MulAssign<Vec4> for Vec4 {
855 #[inline]
856 fn mul_assign(&mut self, rhs: Self) {
857 self.0 = unsafe { _mm_mul_ps(self.0, rhs.0) };
858 }
859}
860
861impl Mul<f32> for Vec4 {
862 type Output = Self;
863 #[inline]
864 fn mul(self, rhs: f32) -> Self {
865 Self(unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) })
866 }
867}
868
869impl MulAssign<f32> for Vec4 {
870 #[inline]
871 fn mul_assign(&mut self, rhs: f32) {
872 self.0 = unsafe { _mm_mul_ps(self.0, _mm_set1_ps(rhs)) };
873 }
874}
875
876impl Mul<Vec4> for f32 {
877 type Output = Vec4;
878 #[inline]
879 fn mul(self, rhs: Vec4) -> Vec4 {
880 Vec4(unsafe { _mm_mul_ps(_mm_set1_ps(self), rhs.0) })
881 }
882}
883
884impl Add<Vec4> for Vec4 {
885 type Output = Self;
886 #[inline]
887 fn add(self, rhs: Self) -> Self {
888 Self(unsafe { _mm_add_ps(self.0, rhs.0) })
889 }
890}
891
892impl AddAssign<Vec4> for Vec4 {
893 #[inline]
894 fn add_assign(&mut self, rhs: Self) {
895 self.0 = unsafe { _mm_add_ps(self.0, rhs.0) };
896 }
897}
898
899impl Add<f32> for Vec4 {
900 type Output = Self;
901 #[inline]
902 fn add(self, rhs: f32) -> Self {
903 Self(unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) })
904 }
905}
906
907impl AddAssign<f32> for Vec4 {
908 #[inline]
909 fn add_assign(&mut self, rhs: f32) {
910 self.0 = unsafe { _mm_add_ps(self.0, _mm_set1_ps(rhs)) };
911 }
912}
913
914impl Add<Vec4> for f32 {
915 type Output = Vec4;
916 #[inline]
917 fn add(self, rhs: Vec4) -> Vec4 {
918 Vec4(unsafe { _mm_add_ps(_mm_set1_ps(self), rhs.0) })
919 }
920}
921
922impl Sub<Vec4> for Vec4 {
923 type Output = Self;
924 #[inline]
925 fn sub(self, rhs: Self) -> Self {
926 Self(unsafe { _mm_sub_ps(self.0, rhs.0) })
927 }
928}
929
930impl SubAssign<Vec4> for Vec4 {
931 #[inline]
932 fn sub_assign(&mut self, rhs: Vec4) {
933 self.0 = unsafe { _mm_sub_ps(self.0, rhs.0) };
934 }
935}
936
937impl Sub<f32> for Vec4 {
938 type Output = Self;
939 #[inline]
940 fn sub(self, rhs: f32) -> Self {
941 Self(unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) })
942 }
943}
944
945impl SubAssign<f32> for Vec4 {
946 #[inline]
947 fn sub_assign(&mut self, rhs: f32) {
948 self.0 = unsafe { _mm_sub_ps(self.0, _mm_set1_ps(rhs)) };
949 }
950}
951
952impl Sub<Vec4> for f32 {
953 type Output = Vec4;
954 #[inline]
955 fn sub(self, rhs: Vec4) -> Vec4 {
956 Vec4(unsafe { _mm_sub_ps(_mm_set1_ps(self), rhs.0) })
957 }
958}
959
960impl Rem<Vec4> for Vec4 {
961 type Output = Self;
962 #[inline]
963 fn rem(self, rhs: Self) -> Self {
964 unsafe {
965 let n = m128_floor(_mm_div_ps(self.0, rhs.0));
966 Self(_mm_sub_ps(self.0, _mm_mul_ps(n, rhs.0)))
967 }
968 }
969}
970
971impl RemAssign<Vec4> for Vec4 {
972 #[inline]
973 fn rem_assign(&mut self, rhs: Self) {
974 *self = self.rem(rhs);
975 }
976}
977
978impl Rem<f32> for Vec4 {
979 type Output = Self;
980 #[inline]
981 fn rem(self, rhs: f32) -> Self {
982 self.rem(Self::splat(rhs))
983 }
984}
985
986impl RemAssign<f32> for Vec4 {
987 #[inline]
988 fn rem_assign(&mut self, rhs: f32) {
989 *self = self.rem(Self::splat(rhs));
990 }
991}
992
993impl Rem<Vec4> for f32 {
994 type Output = Vec4;
995 #[inline]
996 fn rem(self, rhs: Vec4) -> Vec4 {
997 Vec4::splat(self).rem(rhs)
998 }
999}
1000
1001#[cfg(not(target_arch = "spirv"))]
1002impl AsRef<[f32; 4]> for Vec4 {
1003 #[inline]
1004 fn as_ref(&self) -> &[f32; 4] {
1005 unsafe { &*(self as *const Vec4 as *const [f32; 4]) }
1006 }
1007}
1008
1009#[cfg(not(target_arch = "spirv"))]
1010impl AsMut<[f32; 4]> for Vec4 {
1011 #[inline]
1012 fn as_mut(&mut self) -> &mut [f32; 4] {
1013 unsafe { &mut *(self as *mut Vec4 as *mut [f32; 4]) }
1014 }
1015}
1016
1017impl Sum for Vec4 {
1018 #[inline]
1019 fn sum<I>(iter: I) -> Self
1020 where
1021 I: Iterator<Item = Self>,
1022 {
1023 iter.fold(Self::ZERO, Self::add)
1024 }
1025}
1026
1027impl<'a> Sum<&'a Self> for Vec4 {
1028 #[inline]
1029 fn sum<I>(iter: I) -> Self
1030 where
1031 I: Iterator<Item = &'a Self>,
1032 {
1033 iter.fold(Self::ZERO, |a, &b| Self::add(a, b))
1034 }
1035}
1036
1037impl Product for Vec4 {
1038 #[inline]
1039 fn product<I>(iter: I) -> Self
1040 where
1041 I: Iterator<Item = Self>,
1042 {
1043 iter.fold(Self::ONE, Self::mul)
1044 }
1045}
1046
1047impl<'a> Product<&'a Self> for Vec4 {
1048 #[inline]
1049 fn product<I>(iter: I) -> Self
1050 where
1051 I: Iterator<Item = &'a Self>,
1052 {
1053 iter.fold(Self::ONE, |a, &b| Self::mul(a, b))
1054 }
1055}
1056
1057impl Neg for Vec4 {
1058 type Output = Self;
1059 #[inline]
1060 fn neg(self) -> Self {
1061 Self(unsafe { _mm_xor_ps(_mm_set1_ps(-0.0), self.0) })
1062 }
1063}
1064
1065impl Index<usize> for Vec4 {
1066 type Output = f32;
1067 #[inline]
1068 fn index(&self, index: usize) -> &Self::Output {
1069 match index {
1070 0 => &self.x,
1071 1 => &self.y,
1072 2 => &self.z,
1073 3 => &self.w,
1074 _ => panic!("index out of bounds"),
1075 }
1076 }
1077}
1078
1079impl IndexMut<usize> for Vec4 {
1080 #[inline]
1081 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1082 match index {
1083 0 => &mut self.x,
1084 1 => &mut self.y,
1085 2 => &mut self.z,
1086 3 => &mut self.w,
1087 _ => panic!("index out of bounds"),
1088 }
1089 }
1090}
1091
1092#[cfg(not(target_arch = "spirv"))]
1093impl fmt::Display for Vec4 {
1094 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1095 write!(f, "[{}, {}, {}, {}]", self.x, self.y, self.z, self.w)
1096 }
1097}
1098
1099#[cfg(not(target_arch = "spirv"))]
1100impl fmt::Debug for Vec4 {
1101 fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1102 fmt.debug_tuple(stringify!(Vec4))
1103 .field(&self.x)
1104 .field(&self.y)
1105 .field(&self.z)
1106 .field(&self.w)
1107 .finish()
1108 }
1109}
1110
1111impl From<Vec4> for __m128 {
1112 #[inline]
1113 fn from(t: Vec4) -> Self {
1114 t.0
1115 }
1116}
1117
1118impl From<__m128> for Vec4 {
1119 #[inline]
1120 fn from(t: __m128) -> Self {
1121 Self(t)
1122 }
1123}
1124
1125impl From<[f32; 4]> for Vec4 {
1126 #[inline]
1127 fn from(a: [f32; 4]) -> Self {
1128 Self(unsafe { _mm_loadu_ps(a.as_ptr()) })
1129 }
1130}
1131
1132impl From<Vec4> for [f32; 4] {
1133 #[inline]
1134 fn from(v: Vec4) -> Self {
1135 use crate::Align16;
1136 use core::mem::MaybeUninit;
1137 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1138 unsafe {
1139 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1140 out.assume_init().0
1141 }
1142 }
1143}
1144
1145impl From<(f32, f32, f32, f32)> for Vec4 {
1146 #[inline]
1147 fn from(t: (f32, f32, f32, f32)) -> Self {
1148 Self::new(t.0, t.1, t.2, t.3)
1149 }
1150}
1151
1152impl From<Vec4> for (f32, f32, f32, f32) {
1153 #[inline]
1154 fn from(v: Vec4) -> Self {
1155 use crate::Align16;
1156 use core::mem::MaybeUninit;
1157 let mut out: MaybeUninit<Align16<Self>> = MaybeUninit::uninit();
1158 unsafe {
1159 _mm_store_ps(out.as_mut_ptr().cast(), v.0);
1160 out.assume_init().0
1161 }
1162 }
1163}
1164
1165impl From<(Vec3A, f32)> for Vec4 {
1166 #[inline]
1167 fn from((v, w): (Vec3A, f32)) -> Self {
1168 v.extend(w)
1169 }
1170}
1171
1172impl From<(f32, Vec3A)> for Vec4 {
1173 #[inline]
1174 fn from((x, v): (f32, Vec3A)) -> Self {
1175 Self::new(x, v.x, v.y, v.z)
1176 }
1177}
1178
1179impl From<(Vec3, f32)> for Vec4 {
1180 #[inline]
1181 fn from((v, w): (Vec3, f32)) -> Self {
1182 Self::new(v.x, v.y, v.z, w)
1183 }
1184}
1185
1186impl From<(f32, Vec3)> for Vec4 {
1187 #[inline]
1188 fn from((x, v): (f32, Vec3)) -> Self {
1189 Self::new(x, v.x, v.y, v.z)
1190 }
1191}
1192
1193impl From<(Vec2, f32, f32)> for Vec4 {
1194 #[inline]
1195 fn from((v, z, w): (Vec2, f32, f32)) -> Self {
1196 Self::new(v.x, v.y, z, w)
1197 }
1198}
1199
1200impl From<(Vec2, Vec2)> for Vec4 {
1201 #[inline]
1202 fn from((v, u): (Vec2, Vec2)) -> Self {
1203 Self::new(v.x, v.y, u.x, u.y)
1204 }
1205}
1206
1207impl Deref for Vec4 {
1208 type Target = crate::deref::Vec4<f32>;
1209 #[inline]
1210 fn deref(&self) -> &Self::Target {
1211 unsafe { &*(self as *const Self).cast() }
1212 }
1213}
1214
1215impl DerefMut for Vec4 {
1216 #[inline]
1217 fn deref_mut(&mut self) -> &mut Self::Target {
1218 unsafe { &mut *(self as *mut Self).cast() }
1219 }
1220}