25 #ifdef BT_USE_DOUBLE_PRECISION 26 #define btVector3Data btVector3DoubleData 27 #define btVector3DataName "btVector3DoubleData" 29 #define btVector3Data btVector3FloatData 30 #define btVector3DataName "btVector3FloatData" 31 #endif //BT_USE_DOUBLE_PRECISION 33 #if defined BT_USE_SSE 38 #pragma warning(disable: 4556) // value of intrinsic immediate argument '4294967239' is out of range '0 - 255' 42 #define BT_SHUFFLE(x,y,z,w) ((w)<<6 | (z)<<4 | (y)<<2 | (x)) 44 #define bt_pshufd_ps( _a, _mask ) _mm_shuffle_ps((_a), (_a), (_mask) ) 45 #define bt_splat3_ps( _a, _i ) bt_pshufd_ps((_a), BT_SHUFFLE(_i,_i,_i, 3) ) 46 #define bt_splat_ps( _a, _i ) bt_pshufd_ps((_a), BT_SHUFFLE(_i,_i,_i,_i) ) 48 #define btv3AbsiMask (_mm_set_epi32(0x00000000, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF)) 49 #define btvAbsMask (_mm_set_epi32( 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF)) 50 #define btvFFF0Mask (_mm_set_epi32(0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF)) 51 #define btv3AbsfMask btCastiTo128f(btv3AbsiMask) 52 #define btvFFF0fMask btCastiTo128f(btvFFF0Mask) 53 #define btvxyzMaskf btvFFF0fMask 54 #define btvAbsfMask btCastiTo128f(btvAbsMask) 57 #define btvMzeroMask (_mm_set_ps(-0.0f, -0.0f, -0.0f, -0.0f)) 58 #define v1110 (_mm_set_ps(0.0f, 1.0f, 1.0f, 1.0f)) 59 #define vHalf (_mm_set_ps(0.5f, 0.5f, 0.5f, 0.5f)) 60 #define v1_5 (_mm_set_ps(1.5f, 1.5f, 1.5f, 1.5f)) 71 const float32x4_t
ATTRIBUTE_ALIGNED16(btvMzeroMask) = (float32x4_t){-0.0f, -0.0f, -0.0f, -0.0f};
73 static_cast<int32_t>(0xFFFFFFFF),
static_cast<int32_t>(0xFFFFFFFF), 0x0};
74 const int32x4_t
ATTRIBUTE_ALIGNED16(btvAbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
75 const int32x4_t
ATTRIBUTE_ALIGNED16(btv3AbsMask) = (int32x4_t){0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x0};
89 #if defined (__SPU__) && defined (__CELLOS_LV2__) 94 return *((
const vec_float4*)&m_floats[0]);
97 #else //__CELLOS_LV2__ __SPU__ 98 #if defined (BT_USE_SSE) || defined(BT_USE_NEON) // _WIN32 || ARM 100 btSimdFloat4 mVec128;
114 #endif //__CELLOS_LV2__ __SPU__ 139 #if (defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) )|| defined (BT_USE_NEON) 149 mVec128 = rhs.mVec128;
160 #endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON) 166 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 167 mVec128 = _mm_add_ps(mVec128, v.mVec128);
168 #elif defined(BT_USE_NEON) 169 mVec128 = vaddq_f32(mVec128, v.mVec128);
183 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 184 mVec128 = _mm_sub_ps(mVec128, v.mVec128);
185 #elif defined(BT_USE_NEON) 186 mVec128 = vsubq_f32(mVec128, v.mVec128);
199 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 200 __m128 vs = _mm_load_ss(&s);
201 vs = bt_pshufd_ps(vs, 0x80);
202 mVec128 = _mm_mul_ps(mVec128, vs);
203 #elif defined(BT_USE_NEON) 204 mVec128 = vmulq_n_f32(mVec128, s);
219 #if 0 //defined(BT_USE_SSE_IN_API) 221 __m128 vs = _mm_load_ss(&s);
222 vs = _mm_div_ss(v1110, vs);
223 vs = bt_pshufd_ps(vs, 0x00);
225 mVec128 = _mm_mul_ps(mVec128, vs);
237 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 238 __m128 vd = _mm_mul_ps(mVec128, v.mVec128);
239 __m128 z = _mm_movehl_ps(vd, vd);
240 __m128 y = _mm_shuffle_ps(vd, vd, 0x55);
241 vd = _mm_add_ss(vd, y);
242 vd = _mm_add_ss(vd, z);
243 return _mm_cvtss_f32(vd);
244 #elif defined(BT_USE_NEON) 245 float32x4_t vd = vmulq_f32(mVec128, v.mVec128);
246 float32x2_t x = vpadd_f32(vget_low_f32(vd), vget_low_f32(vd));
247 x = vadd_f32(x, vget_high_f32(vd));
248 return vget_lane_f32(x, 0);
250 return m_floats[0] * v.
m_floats[0] +
285 int maxIndex = absVec.
maxAxis();
286 if (absVec[maxIndex]>0)
288 *
this /= absVec[maxIndex];
302 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 304 __m128 vd = _mm_mul_ps(mVec128, mVec128);
305 __m128 z = _mm_movehl_ps(vd, vd);
306 __m128 y = _mm_shuffle_ps(vd, vd, 0x55);
307 vd = _mm_add_ss(vd, y);
308 vd = _mm_add_ss(vd, z);
311 vd = _mm_sqrt_ss(vd);
312 vd = _mm_div_ss(v1110, vd);
313 vd = bt_splat_ps(vd, 0x80);
314 mVec128 = _mm_mul_ps(mVec128, vd);
318 y = _mm_rsqrt_ss(vd);
322 vd = _mm_mul_ss(vd, vHalf);
324 vd = _mm_mul_ss(vd, y);
325 vd = _mm_mul_ss(vd, y);
326 z = _mm_sub_ss(z, vd);
328 y = _mm_mul_ss(y, z);
330 y = bt_splat_ps(y, 0x80);
331 mVec128 = _mm_mul_ps(mVec128, y);
363 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 364 return btVector3(_mm_and_ps(mVec128, btv3AbsfMask));
365 #elif defined(BT_USE_NEON) 379 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 382 T = bt_pshufd_ps(mVec128, BT_SHUFFLE(1, 2, 0, 3));
383 V = bt_pshufd_ps(v.mVec128, BT_SHUFFLE(1, 2, 0, 3));
385 V = _mm_mul_ps(V, mVec128);
386 T = _mm_mul_ps(T, v.mVec128);
387 V = _mm_sub_ps(V, T);
389 V = bt_pshufd_ps(V, BT_SHUFFLE(1, 2, 0, 3));
391 #elif defined(BT_USE_NEON) 394 float32x2_t Tlow = vget_low_f32(mVec128);
395 float32x2_t Vlow = vget_low_f32(v.mVec128);
396 T = vcombine_f32(vext_f32(Tlow, vget_high_f32(mVec128), 1), Tlow);
397 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(v.mVec128), 1), Vlow);
399 V = vmulq_f32(V, mVec128);
400 T = vmulq_f32(T, v.mVec128);
402 Vlow = vget_low_f32(V);
404 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
405 V = (float32x4_t)vandq_s32((int32x4_t)V, btvFFF0Mask);
418 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 420 __m128 T = _mm_shuffle_ps(v1.mVec128, v1.mVec128, BT_SHUFFLE(1, 2, 0, 3));
421 __m128 V = _mm_shuffle_ps(v2.mVec128, v2.mVec128, BT_SHUFFLE(1, 2, 0, 3));
423 V = _mm_mul_ps(V, v1.mVec128);
424 T = _mm_mul_ps(T, v2.mVec128);
425 V = _mm_sub_ps(V, T);
427 V = _mm_shuffle_ps(V, V, BT_SHUFFLE(1, 2, 0, 3));
430 V = _mm_mul_ps(V, mVec128);
431 __m128 z = _mm_movehl_ps(V, V);
432 __m128 y = _mm_shuffle_ps(V, V, 0x55);
433 V = _mm_add_ss(V, y);
434 V = _mm_add_ss(V, z);
435 return _mm_cvtss_f32(V);
437 #elif defined(BT_USE_NEON) 441 float32x2_t Tlow = vget_low_f32(v1.mVec128);
442 float32x2_t Vlow = vget_low_f32(v2.mVec128);
443 T = vcombine_f32(vext_f32(Tlow, vget_high_f32(v1.mVec128), 1), Tlow);
444 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(v2.mVec128), 1), Vlow);
446 V = vmulq_f32(V, v1.mVec128);
447 T = vmulq_f32(T, v2.mVec128);
449 Vlow = vget_low_f32(V);
451 V = vcombine_f32(vext_f32(Vlow, vget_high_f32(V), 1), Vlow);
454 V = vmulq_f32(mVec128, V);
455 float32x2_t x = vpadd_f32(vget_low_f32(V), vget_low_f32(V));
456 x = vadd_f32(x, vget_high_f32(V));
457 return vget_lane_f32(x, 0);
470 return m_floats[0] < m_floats[1] ? (m_floats[0] <m_floats[2] ? 0 : 2) : (m_floats[1] <m_floats[2] ? 1 : 2);
477 return m_floats[0] < m_floats[1] ? (m_floats[1] <m_floats[2] ? 2 : 1) : (m_floats[0] <m_floats[2] ? 2 : 0);
482 return absolute().minAxis();
487 return absolute().maxAxis();
493 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 494 __m128 vrt = _mm_load_ss(&rt);
496 __m128 vs = _mm_load_ss(&s);
497 vs = bt_pshufd_ps(vs, 0x80);
498 __m128 r0 = _mm_mul_ps(v0.mVec128, vs);
499 vrt = bt_pshufd_ps(vrt, 0x80);
500 __m128 r1 = _mm_mul_ps(v1.mVec128, vrt);
501 __m128 tmp3 = _mm_add_ps(r0,r1);
503 #elif defined(BT_USE_NEON) 504 float32x4_t vl = vsubq_f32(v1.mVec128, v0.mVec128);
505 vl = vmulq_n_f32(vl, rt);
506 mVec128 = vaddq_f32(vl, v0.mVec128);
522 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 523 __m128 vt = _mm_load_ss(&t);
524 vt = bt_pshufd_ps(vt, 0x80);
525 __m128 vl = _mm_sub_ps(v.mVec128, mVec128);
526 vl = _mm_mul_ps(vl, vt);
527 vl = _mm_add_ps(vl, mVec128);
530 #elif defined(BT_USE_NEON) 531 float32x4_t vl = vsubq_f32(v.mVec128, mVec128);
532 vl = vmulq_n_f32(vl, t);
533 vl = vaddq_f32(vl, mVec128);
539 m_floats[1] + (v.
m_floats[1] - m_floats[1]) * t,
540 m_floats[2] + (v.
m_floats[2] - m_floats[2]) * t);
548 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 549 mVec128 = _mm_mul_ps(mVec128, v.mVec128);
550 #elif defined(BT_USE_NEON) 551 mVec128 = vmulq_f32(mVec128, v.mVec128);
591 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 592 return (0xf == _mm_movemask_ps((__m128)_mm_cmpeq_ps(mVec128, other.mVec128)));
594 return ((m_floats[3]==other.
m_floats[3]) &&
603 return !(*
this == other);
611 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 612 mVec128 = _mm_max_ps(mVec128, other.mVec128);
613 #elif defined(BT_USE_NEON) 614 mVec128 = vmaxq_f32(mVec128, other.mVec128);
628 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 629 mVec128 = _mm_min_ps(mVec128, other.mVec128);
630 #elif defined(BT_USE_NEON) 631 mVec128 = vminq_f32(mVec128, other.mVec128);
650 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 652 __m128 V = _mm_and_ps(mVec128, btvFFF0fMask);
653 __m128 V0 = _mm_xor_ps(btvMzeroMask, V);
654 __m128 V2 = _mm_movelh_ps(V0, V);
656 __m128 V1 = _mm_shuffle_ps(V, V0, 0xCE);
658 V0 = _mm_shuffle_ps(V0, V, 0xDB);
659 V2 = _mm_shuffle_ps(V2, V, 0xF9);
673 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 674 mVec128 = (__m128)_mm_xor_ps(mVec128, mVec128);
675 #elif defined(BT_USE_NEON) 676 int32x4_t vi = vdupq_n_s32(0);
677 mVec128 = vreinterpretq_f32_s32(vi);
721 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 723 __m128 a0 = _mm_mul_ps( v0.mVec128, this->mVec128 );
724 __m128 a1 = _mm_mul_ps( v1.mVec128, this->mVec128 );
725 __m128 a2 = _mm_mul_ps( v2.mVec128, this->mVec128 );
726 __m128 b0 = _mm_unpacklo_ps( a0, a1 );
727 __m128 b1 = _mm_unpackhi_ps( a0, a1 );
728 __m128 b2 = _mm_unpacklo_ps( a2, _mm_setzero_ps() );
729 __m128 r = _mm_movelh_ps( b0, b2 );
730 r = _mm_add_ps( r, _mm_movehl_ps( b2, b0 ));
731 a2 = _mm_and_ps( a2, btvxyzMaskf);
732 r = _mm_add_ps( r, btCastdTo128f (_mm_move_sd( btCastfTo128d(a2), btCastfTo128d(b1) )));
735 #elif defined(BT_USE_NEON) 736 static const uint32x4_t xyzMask = (
const uint32x4_t){
static_cast<uint32_t>(-1), static_cast<uint32_t>(-1),
static_cast<uint32_t>(-1), 0 };
737 float32x4_t a0 = vmulq_f32( v0.mVec128, this->mVec128);
738 float32x4_t a1 = vmulq_f32( v1.mVec128, this->mVec128);
739 float32x4_t a2 = vmulq_f32( v2.mVec128, this->mVec128);
740 float32x2x2_t zLo = vtrn_f32( vget_high_f32(a0), vget_high_f32(a1));
741 a2 = (float32x4_t) vandq_u32((uint32x4_t) a2, xyzMask );
742 float32x2_t b0 = vadd_f32( vpadd_f32( vget_low_f32(a0), vget_low_f32(a1)), zLo.val[0] );
743 float32x2_t b1 = vpadd_f32( vpadd_f32( vget_low_f32(a2), vget_high_f32(a2)), vdup_n_f32(0.0f));
744 return btVector3( vcombine_f32(b0, b1) );
755 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 756 return btVector3(_mm_add_ps(v1.mVec128, v2.mVec128));
757 #elif defined(BT_USE_NEON) 758 return btVector3(vaddq_f32(v1.mVec128, v2.mVec128));
771 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 772 return btVector3(_mm_mul_ps(v1.mVec128, v2.mVec128));
773 #elif defined(BT_USE_NEON) 774 return btVector3(vmulq_f32(v1.mVec128, v2.mVec128));
787 #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API) && defined(BT_USE_SSE)) 790 __m128 r = _mm_sub_ps(v1.mVec128, v2.mVec128);
791 return btVector3(_mm_and_ps(r, btvFFF0fMask));
792 #elif defined(BT_USE_NEON) 793 float32x4_t r = vsubq_f32(v1.mVec128, v2.mVec128);
794 return btVector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
807 #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE)) 808 __m128 r = _mm_xor_ps(v.mVec128, btvMzeroMask);
809 return btVector3(_mm_and_ps(r, btvFFF0fMask));
810 #elif defined(BT_USE_NEON) 811 return btVector3((btSimdFloat4)veorq_s32((int32x4_t)v.mVec128, (int32x4_t)btvMzeroMask));
821 #if defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 822 __m128 vs = _mm_load_ss(&s);
823 vs = bt_pshufd_ps(vs, 0x80);
824 return btVector3(_mm_mul_ps(v.mVec128, vs));
825 #elif defined(BT_USE_NEON) 826 float32x4_t r = vmulq_n_f32(v.mVec128, s);
827 return btVector3((float32x4_t)vandq_s32((int32x4_t)r, btvFFF0Mask));
845 #if 0 //defined(BT_USE_SSE_IN_API) 847 __m128 vs = _mm_load_ss(&s);
848 vs = _mm_div_ss(v1110, vs);
849 vs = bt_pshufd_ps(vs, 0x00);
851 return btVector3(_mm_mul_ps(v.mVec128, vs));
861 #if defined BT_USE_SIMD_VECTOR3 && (defined(BT_USE_SSE_IN_API)&& defined (BT_USE_SSE)) 862 __m128 vec = _mm_div_ps(v1.mVec128, v2.mVec128);
863 vec = _mm_and_ps(vec, btvFFF0fMask);
865 #elif defined(BT_USE_NEON) 866 float32x4_t x, y, v, m;
872 m = vrecpsq_f32(y, v);
874 m = vrecpsq_f32(y, v);
937 return v1.
lerp(v2, t);
944 return (v - *
this).length2();
949 return (v - *
this).length();
963 #if defined BT_USE_SIMD_VECTOR3 && defined (BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 965 __m128 O = _mm_mul_ps(wAxis.mVec128, mVec128);
967 __m128 C = wAxis.
cross( mVec128 ).mVec128;
968 O = _mm_and_ps(O, btvFFF0fMask);
971 __m128 vsin = _mm_load_ss(&ssin);
972 __m128 vcos = _mm_load_ss(&scos);
974 __m128 Y = bt_pshufd_ps(O, 0xC9);
975 __m128 Z = bt_pshufd_ps(O, 0xD2);
976 O = _mm_add_ps(O, Y);
977 vsin = bt_pshufd_ps(vsin, 0x80);
978 O = _mm_add_ps(O, Z);
979 vcos = bt_pshufd_ps(vcos, 0x80);
982 O = O * wAxis.mVec128;
983 __m128 X = mVec128 - O;
995 _y = wAxis.
cross( *
this );
997 return ( o + _x *
btCos( _angle ) + _y *
btSin( _angle ) );
1003 #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined (BT_USE_NEON) 1004 #if defined _WIN32 || defined (BT_USE_SSE) 1005 const long scalar_cutoff = 10;
1006 long _maxdot_large(
const float *array,
const float *vec,
unsigned long array_count,
float *dotOut );
1007 #elif defined BT_USE_NEON 1008 const long scalar_cutoff = 4;
1009 extern long (*_maxdot_large)(
const float *array,
const float *vec,
unsigned long array_count,
float *dotOut );
1011 if( array_count < scalar_cutoff )
1017 for( i = 0; i < array_count; i++ )
1031 #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined (BT_USE_NEON) 1032 return _maxdot_large( (
float*) array, (
float*) &
m_floats[0], array_count, &dotOut );
1038 #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined (BT_USE_NEON) 1039 #if defined BT_USE_SSE 1040 const long scalar_cutoff = 10;
1041 long _mindot_large(
const float *array,
const float *vec,
unsigned long array_count,
float *dotOut );
1042 #elif defined BT_USE_NEON 1043 const long scalar_cutoff = 4;
1044 extern long (*_mindot_large)(
const float *array,
const float *vec,
unsigned long array_count,
float *dotOut );
1046 #error unhandled arch! 1049 if( array_count < scalar_cutoff )
1056 for( i = 0; i < array_count; i++ )
1071 #if (defined BT_USE_SSE && defined BT_USE_SIMD_VECTOR3 && defined BT_USE_SSE_IN_API) || defined (BT_USE_NEON) 1072 return _mindot_large( (
float*) array, (
float*) &
m_floats[0], array_count, &dotOut );
1073 #endif//BT_USE_SIMD_VECTOR3 1090 #if (defined (BT_USE_SSE_IN_API)&& defined (BT_USE_SSE)) || defined (BT_USE_NEON) 1098 mVec128 = rhs.mVec128;
1104 mVec128 = v.mVec128;
1107 #endif // #if defined (BT_USE_SSE_IN_API) || defined (BT_USE_NEON) 1111 #if defined BT_USE_SIMD_VECTOR3 && defined(BT_USE_SSE_IN_API) && defined (BT_USE_SSE) 1112 return btVector4(_mm_and_ps(mVec128, btvAbsfMask));
1113 #elif defined(BT_USE_NEON) 1188 return absolute4().maxAxis4();
1229 #ifdef BT_USE_DOUBLE_PRECISION 1230 unsigned char* dest = (
unsigned char*) &destVal;
1231 unsigned char* src = (
unsigned char*) &sourceVal;
1241 unsigned char* dest = (
unsigned char*) &destVal;
1242 unsigned char* src = (
unsigned char*) &sourceVal;
1247 #endif //BT_USE_DOUBLE_PRECISION 1252 for (
int i=0;i<4;i++)
1264 for (
int i=0;i<4;i++)
1268 vector = swappedVec;
1276 btScalar a = n[1]*n[1] + n[2]*n[2];
1288 btScalar a = n[0]*n[0] + n[1]*n[1];
1315 for (
int i=0;i<4;i++)
1321 for (
int i=0;i<4;i++)
1329 for (
int i=0;i<4;i++)
1335 for (
int i=0;i<4;i++)
1343 for (
int i=0;i<4;i++)
1349 for (
int i=0;i<4;i++)
1353 #endif //BT_VECTOR3_H btScalar angle(const btVector3 &v) const
Return the angle between this and another vector.
btScalar length(const btQuaternion &q)
Return the length of a quaternion.
bool operator!=(const btVector3 &other) const
btVector3 & operator*=(const btVector3 &v)
Elementwise multiply this vector by the other.
void deSerializeDouble(const struct btVector3DoubleData &dataIn)
void setValue(const btScalar &_x, const btScalar &_y, const btScalar &_z)
btVector3 & operator+=(const btVector3 &v)
Add a vector to this one.
btScalar distance(const btVector3 &v) const
Return the distance between the ends of this and another vector This is symantically treating the vec...
bool operator==(const btVector3 &other) const
btVector3 operator*(const btVector3 &v1, const btVector3 &v2)
Return the elementwise product of two vectors.
btScalar btAngle(const btVector3 &v1, const btVector3 &v2)
Return the angle between two vectors.
btScalar btSin(btScalar x)
void setZ(btScalar _z)
Set the z value.
void deSerialize(const struct btVector3Data &dataIn)
void btPlaneSpace1(const T &n, T &p, T &q)
btScalar btSqrt(btScalar y)
void serializeDouble(struct btVector3DoubleData &dataOut) const
btVector4(const btScalar &_x, const btScalar &_y, const btScalar &_z, const btScalar &_w)
btVector3 absolute() const
Return a vector will the absolute values of each element.
long maxDot(const btVector3 *array, long array_count, btScalar &dotOut) const
returns index of maximum dot product between this and vectors in array[]
#define SIMD_FORCE_INLINE
void btSwapScalarEndian(const btScalar &sourceVal, btScalar &destVal)
btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization ...
btScalar distance2(const btVector3 &v) const
Return the distance squared between the ends of this and another vector This is symantically treating...
btScalar dot(const btVector3 &v) const
Return the dot product.
btVector3 & safeNormalize()
btVector3 lerp(const btVector3 &v, const btScalar &t) const
Return the linear interpolation between this and another vector.
btVector3 & operator/=(const btScalar &s)
Inversely scale the vector.
long minDot(const btVector3 *array, long array_count, btScalar &dotOut) const
returns index of minimum dot product between this and vectors in array[]
btVector3 & normalize()
Normalize this vector x^2 + y^2 + z^2 = 1.
const btScalar & x() const
Return the x value.
const btScalar & getZ() const
Return the z value.
void btSetMin(T &a, const T &b)
btVector3()
No initialization constructor.
btVector3 btCross(const btVector3 &v1, const btVector3 &v2)
Return the cross product of two vectors.
btScalar btDistance(const btVector3 &v1, const btVector3 &v2)
Return the distance between two vectors.
void setX(btScalar _x)
Set the x value.
btVector3 rotate(const btVector3 &wAxis, const btScalar angle) const
Return a rotated version of this vector.
btScalar triple(const btVector3 &v1, const btVector3 &v2) const
const btScalar & w() const
Return the w value.
void setW(btScalar _w)
Set the w value.
btVector3 cross(const btVector3 &v) const
Return the cross product between this and another vector.
void serialize(struct btVector3Data &dataOut) const
const btScalar & getY() const
Return the y value.
void setY(btScalar _y)
Set the y value.
const btScalar & getX() const
Return the x value.
btScalar length() const
Return the length of the vector.
void btUnSwapVector3Endian(btVector3 &vector)
btUnSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization ...
btScalar norm() const
Return the norm (length) of the vector.
void serializeFloat(struct btVector3FloatData &dataOut) const
const btScalar & y() const
Return the y value.
void btSetMax(T &a, const T &b)
btVector3 & operator*=(const btScalar &s)
Scale the vector.
btVector3 can be used to represent 3D points and vectors.
#define ATTRIBUTE_ALIGNED16(a)
btScalar length2() const
Return the length of the vector squared.
btScalar btAcos(btScalar x)
btVector3 normalized() const
Return a normalized version of this vector.
btVector3(const btScalar &_x, const btScalar &_y, const btScalar &_z)
Constructor from scalars.
btVector3 operator+(const btVector3 &v1, const btVector3 &v2)
Return the sum of two vectors (Point symantics)
#define BT_DECLARE_ALIGNED_ALLOCATOR()
int minAxis() const
Return the axis with the smallest value Note return values are 0,1,2 for x, y, or z...
btVector3 dot3(const btVector3 &v0, const btVector3 &v1, const btVector3 &v2) const
btScalar dot(const btQuaternion &q1, const btQuaternion &q2)
Calculate the dot product between two quaternions.
btScalar btDot(const btVector3 &v1, const btVector3 &v2)
Return the dot product between two vectors.
void setMax(const btVector3 &other)
Set each element to the max of the current values and the values of another btVector3.
void deSerializeFloat(const struct btVector3FloatData &dataIn)
btScalar btDistance2(const btVector3 &v1, const btVector3 &v2)
Return the distance squared between two vectors.
btVector3 operator/(const btVector3 &v, const btScalar &s)
Return the vector inversely scaled by s.
btVector3 operator-(const btVector3 &v1, const btVector3 &v2)
Return the difference between two vectors.
void setInterpolate3(const btVector3 &v0, const btVector3 &v1, btScalar rt)
btScalar btTriple(const btVector3 &v1, const btVector3 &v2, const btVector3 &v3)
btVector3 & operator-=(const btVector3 &v)
Subtract a vector from this one.
void btSwapVector3Endian(const btVector3 &sourceVec, btVector3 &destVec)
btSwapVector3Endian swaps vector endianness, useful for network and cross-platform serialization ...
btVector3 lerp(const btVector3 &v1, const btVector3 &v2, const btScalar &t)
Return the linear interpolation between two vectors.
void getSkewSymmetricMatrix(btVector3 *v0, btVector3 *v1, btVector3 *v2) const
void setMin(const btVector3 &other)
Set each element to the min of the current values and the values of another btVector3.
float btScalar
The btScalar type abstracts floating point numbers, to easily switch between double and single floati...
btScalar btCos(btScalar x)
btVector4 absolute4() const
int maxAxis() const
Return the axis with the largest value Note return values are 0,1,2 for x, y, or z.
void setValue(const btScalar &_x, const btScalar &_y, const btScalar &_z, const btScalar &_w)
Set x,y,z and zero w.
btScalar btFabs(btScalar x)
const btScalar & z() const
Return the z value.