type_quat_simd.inl 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /// @ref core
  2. #if GLM_ARCH & GLM_ARCH_SSE2_BIT
  3. namespace glm{
  4. namespace detail
  5. {
  6. /*
  7. template<qualifier Q>
  8. struct compute_quat_mul<float, Q, true>
  9. {
  10. static qua<float, Q> call(qua<float, Q> const& q1, qua<float, Q> const& q2)
  11. {
  12. // SSE2 STATS: 11 shuffle, 8 mul, 8 add
  13. // SSE4 STATS: 3 shuffle, 4 mul, 4 dpps
  14. __m128 const mul0 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(0, 1, 2, 3)));
  15. __m128 const mul1 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(1, 0, 3, 2)));
  16. __m128 const mul2 = _mm_mul_ps(q1.Data, _mm_shuffle_ps(q2.Data, q2.Data, _MM_SHUFFLE(2, 3, 0, 1)));
  17. __m128 const mul3 = _mm_mul_ps(q1.Data, q2.Data);
  18. # if GLM_ARCH & GLM_ARCH_SSE41_BIT
  19. __m128 const add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff);
  20. __m128 const add1 = _mm_dp_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f), 0xff);
  21. __m128 const add2 = _mm_dp_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f), 0xff);
  22. __m128 const add3 = _mm_dp_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f), 0xff);
  23. # else
  24. __m128 const mul4 = _mm_mul_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f));
  25. __m128 const add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul4, mul4));
  26. __m128 const add4 = _mm_add_ss(add0, _mm_shuffle_ps(add0, add0, 1));
  27. __m128 const mul5 = _mm_mul_ps(mul1, _mm_set_ps(1.0f, 1.0f, 1.0f, -1.0f));
  28. __m128 const add1 = _mm_add_ps(mul1, _mm_movehl_ps(mul5, mul5));
  29. __m128 const add5 = _mm_add_ss(add1, _mm_shuffle_ps(add1, add1, 1));
  30. __m128 const mul6 = _mm_mul_ps(mul2, _mm_set_ps(1.0f, 1.0f, -1.0f, 1.0f));
  31. __m128 const add2 = _mm_add_ps(mul6, _mm_movehl_ps(mul6, mul6));
  32. __m128 const add6 = _mm_add_ss(add2, _mm_shuffle_ps(add2, add2, 1));
  33. __m128 const mul7 = _mm_mul_ps(mul3, _mm_set_ps(1.0f, -1.0f, -1.0f, -1.0f));
  34. __m128 const add3 = _mm_add_ps(mul3, _mm_movehl_ps(mul7, mul7));
  35. __m128 const add7 = _mm_add_ss(add3, _mm_shuffle_ps(add3, add3, 1));
  36. #endif
  37. // This SIMD code is a politically correct way of doing this, but in every test I've tried it has been slower than
  38. // the final code below. I'll keep this here for reference - maybe somebody else can do something better...
  39. //
  40. //__m128 xxyy = _mm_shuffle_ps(add4, add5, _MM_SHUFFLE(0, 0, 0, 0));
  41. //__m128 zzww = _mm_shuffle_ps(add6, add7, _MM_SHUFFLE(0, 0, 0, 0));
  42. //
  43. //return _mm_shuffle_ps(xxyy, zzww, _MM_SHUFFLE(2, 0, 2, 0));
  44. qua<float, Q> Result;
  45. _mm_store_ss(&Result.x, add4);
  46. _mm_store_ss(&Result.y, add5);
  47. _mm_store_ss(&Result.z, add6);
  48. _mm_store_ss(&Result.w, add7);
  49. return Result;
  50. }
  51. };
  52. */
  53. template<qualifier Q>
  54. struct compute_quat_add<float, Q, true>
  55. {
  56. static qua<float, Q> call(qua<float, Q> const& q, qua<float, Q> const& p)
  57. {
  58. qua<float, Q> Result;
  59. Result.data = _mm_add_ps(q.data, p.data);
  60. return Result;
  61. }
  62. };
  63. # if GLM_ARCH & GLM_ARCH_AVX_BIT
  64. template<qualifier Q>
  65. struct compute_quat_add<double, Q, true>
  66. {
  67. static qua<double, Q> call(qua<double, Q> const& a, qua<double, Q> const& b)
  68. {
  69. qua<double, Q> Result;
  70. Result.data = _mm256_add_pd(a.data, b.data);
  71. return Result;
  72. }
  73. };
  74. # endif
  75. template<qualifier Q>
  76. struct compute_quat_sub<float, Q, true>
  77. {
  78. static qua<float, Q> call(qua<float, Q> const& q, qua<float, Q> const& p)
  79. {
  80. vec<4, float, Q> Result;
  81. Result.data = _mm_sub_ps(q.data, p.data);
  82. return Result;
  83. }
  84. };
  85. # if GLM_ARCH & GLM_ARCH_AVX_BIT
  86. template<qualifier Q>
  87. struct compute_quat_sub<double, Q, true>
  88. {
  89. static qua<double, Q> call(qua<double, Q> const& a, qua<double, Q> const& b)
  90. {
  91. qua<double, Q> Result;
  92. Result.data = _mm256_sub_pd(a.data, b.data);
  93. return Result;
  94. }
  95. };
  96. # endif
  97. template<qualifier Q>
  98. struct compute_quat_mul_scalar<float, Q, true>
  99. {
  100. static qua<float, Q> call(qua<float, Q> const& q, float s)
  101. {
  102. vec<4, float, Q> Result;
  103. Result.data = _mm_mul_ps(q.data, _mm_set_ps1(s));
  104. return Result;
  105. }
  106. };
  107. # if GLM_ARCH & GLM_ARCH_AVX_BIT
  108. template<qualifier Q>
  109. struct compute_quat_mul_scalar<double, Q, true>
  110. {
  111. static qua<double, Q> call(qua<double, Q> const& q, double s)
  112. {
  113. qua<double, Q> Result;
  114. Result.data = _mm256_mul_pd(q.data, _mm_set_ps1(s));
  115. return Result;
  116. }
  117. };
  118. # endif
  119. template<qualifier Q>
  120. struct compute_quat_div_scalar<float, Q, true>
  121. {
  122. static qua<float, Q> call(qua<float, Q> const& q, float s)
  123. {
  124. vec<4, float, Q> Result;
  125. Result.data = _mm_div_ps(q.data, _mm_set_ps1(s));
  126. return Result;
  127. }
  128. };
  129. # if GLM_ARCH & GLM_ARCH_AVX_BIT
  130. template<qualifier Q>
  131. struct compute_quat_div_scalar<double, Q, true>
  132. {
  133. static qua<double, Q> call(qua<double, Q> const& q, double s)
  134. {
  135. qua<double, Q> Result;
  136. Result.data = _mm256_div_pd(q.data, _mm_set_ps1(s));
  137. return Result;
  138. }
  139. };
  140. # endif
  141. template<qualifier Q>
  142. struct compute_quat_mul_vec4<float, Q, true>
  143. {
  144. static vec<4, float, Q> call(qua<float, Q> const& q, vec<4, float, Q> const& v)
  145. {
  146. __m128 const q_wwww = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 3, 3, 3));
  147. __m128 const q_swp0 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 0, 2, 1));
  148. __m128 const q_swp1 = _mm_shuffle_ps(q.data, q.data, _MM_SHUFFLE(3, 1, 0, 2));
  149. __m128 const v_swp0 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 0, 2, 1));
  150. __m128 const v_swp1 = _mm_shuffle_ps(v.data, v.data, _MM_SHUFFLE(3, 1, 0, 2));
  151. __m128 uv = _mm_sub_ps(_mm_mul_ps(q_swp0, v_swp1), _mm_mul_ps(q_swp1, v_swp0));
  152. __m128 uv_swp0 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 0, 2, 1));
  153. __m128 uv_swp1 = _mm_shuffle_ps(uv, uv, _MM_SHUFFLE(3, 1, 0, 2));
  154. __m128 uuv = _mm_sub_ps(_mm_mul_ps(q_swp0, uv_swp1), _mm_mul_ps(q_swp1, uv_swp0));
  155. __m128 const two = _mm_set1_ps(2.0f);
  156. uv = _mm_mul_ps(uv, _mm_mul_ps(q_wwww, two));
  157. uuv = _mm_mul_ps(uuv, two);
  158. vec<4, float, Q> Result;
  159. Result.data = _mm_add_ps(v.Data, _mm_add_ps(uv, uuv));
  160. return Result;
  161. }
  162. };
  163. }//namespace detail
  164. }//namespace glm
  165. #endif//GLM_ARCH & GLM_ARCH_SSE2_BIT