sse_optimized.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. ////////////////////////////////////////////////////////////////////////////////
  2. ///
  3. /// SSE optimized routines for Pentium-III, Athlon-XP and later CPUs. All SSE
  4. /// optimized functions have been gathered into this single source
  5. /// code file, regardless to their class or original source code file, in order
  6. /// to ease porting the library to other compiler and processor platforms.
  7. ///
  8. /// The SSE-optimizations are programmed using SSE compiler intrinsics that
  9. /// are supported both by Microsoft Visual C++ and GCC compilers, so this file
  10. /// should compile with both toolsets.
  11. ///
  12. /// NOTICE: If using Visual Studio 6.0, you'll need to install the "Visual C++
  13. /// 6.0 processor pack" update to support SSE instruction set. The update is
  14. /// available for download at Microsoft Developers Network, see here:
  15. /// http://msdn.microsoft.com/en-us/vstudio/aa718349.aspx
  16. ///
  17. /// If the above URL is expired or removed, go to "http://msdn.microsoft.com" and
  18. /// perform a search with keywords "processor pack".
  19. ///
  20. /// Author : Copyright (c) Olli Parviainen
  21. /// Author e-mail : oparviai 'at' iki.fi
  22. /// SoundTouch WWW: http://www.surina.net/soundtouch
  23. ///
  24. ////////////////////////////////////////////////////////////////////////////////
  25. //
  26. // Last changed : $Date: 2012-11-08 20:53:01 +0200 (Thu, 08 Nov 2012) $
  27. // File revision : $Revision: 4 $
  28. //
  29. // $Id: sse_optimized.cpp 160 2012-11-08 18:53:01Z oparviai $
  30. //
  31. ////////////////////////////////////////////////////////////////////////////////
  32. //
  33. // License :
  34. //
  35. // SoundTouch audio processing library
  36. // Copyright (c) Olli Parviainen
  37. //
  38. // This library is free software; you can redistribute it and/or
  39. // modify it under the terms of the GNU Lesser General Public
  40. // License as published by the Free Software Foundation; either
  41. // version 2.1 of the License, or (at your option) any later version.
  42. //
  43. // This library is distributed in the hope that it will be useful,
  44. // but WITHOUT ANY WARRANTY; without even the implied warranty of
  45. // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  46. // Lesser General Public License for more details.
  47. //
  48. // You should have received a copy of the GNU Lesser General Public
  49. // License along with this library; if not, write to the Free Software
  50. // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  51. //
  52. ////////////////////////////////////////////////////////////////////////////////
  53. #include "cpu_detect.h"
  54. #include "STTypes.h"
  55. using namespace soundtouch;
  56. #ifdef SOUNDTOUCH_ALLOW_SSE
  57. // SSE routines available only with float sample type
  58. //////////////////////////////////////////////////////////////////////////////
  59. //
  60. // implementation of SSE optimized functions of class 'TDStretchSSE'
  61. //
  62. //////////////////////////////////////////////////////////////////////////////
  63. #include "TDStretch.h"
  64. #include <xmmintrin.h>
  65. #include <math.h>
  66. // Calculates cross correlation of two buffers
  67. double TDStretchSSE::calcCrossCorr(const float *pV1, const float *pV2) const
  68. {
  69. int i;
  70. const float *pVec1;
  71. const __m128 *pVec2;
  72. __m128 vSum, vNorm;
  73. // Note. It means a major slow-down if the routine needs to tolerate
  74. // unaligned __m128 memory accesses. It's way faster if we can skip
  75. // unaligned slots and use _mm_load_ps instruction instead of _mm_loadu_ps.
  76. // This can mean up to ~ 10-fold difference (incl. part of which is
  77. // due to skipping every second round for stereo sound though).
  78. //
  79. // Compile-time define SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION is provided
  80. // for choosing if this little cheating is allowed.
  81. #ifdef SOUNDTOUCH_ALLOW_NONEXACT_SIMD_OPTIMIZATION
  82. // Little cheating allowed, return valid correlation only for
  83. // aligned locations, meaning every second round for stereo sound.
  84. #define _MM_LOAD _mm_load_ps
  85. if (((ulongptr)pV1) & 15) return -1e50; // skip unaligned locations
  86. #else
  87. // No cheating allowed, use unaligned load & take the resulting
  88. // performance hit.
  89. #define _MM_LOAD _mm_loadu_ps
  90. #endif
  91. // ensure overlapLength is divisible by 8
  92. assert((overlapLength % 8) == 0);
  93. // Calculates the cross-correlation value between 'pV1' and 'pV2' vectors
  94. // Note: pV2 _must_ be aligned to 16-bit boundary, pV1 need not.
  95. pVec1 = (const float*)pV1;
  96. pVec2 = (const __m128*)pV2;
  97. vSum = vNorm = _mm_setzero_ps();
  98. // Unroll the loop by factor of 4 * 4 operations. Use same routine for
  99. // stereo & mono, for mono it just means twice the amount of unrolling.
  100. for (i = 0; i < channels * overlapLength / 16; i ++)
  101. {
  102. __m128 vTemp;
  103. // vSum += pV1[0..3] * pV2[0..3]
  104. vTemp = _MM_LOAD(pVec1);
  105. vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp ,pVec2[0]));
  106. vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp));
  107. // vSum += pV1[4..7] * pV2[4..7]
  108. vTemp = _MM_LOAD(pVec1 + 4);
  109. vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[1]));
  110. vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp));
  111. // vSum += pV1[8..11] * pV2[8..11]
  112. vTemp = _MM_LOAD(pVec1 + 8);
  113. vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[2]));
  114. vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp));
  115. // vSum += pV1[12..15] * pV2[12..15]
  116. vTemp = _MM_LOAD(pVec1 + 12);
  117. vSum = _mm_add_ps(vSum, _mm_mul_ps(vTemp, pVec2[3]));
  118. vNorm = _mm_add_ps(vNorm, _mm_mul_ps(vTemp ,vTemp));
  119. pVec1 += 16;
  120. pVec2 += 4;
  121. }
  122. // return value = vSum[0] + vSum[1] + vSum[2] + vSum[3]
  123. float *pvNorm = (float*)&vNorm;
  124. double norm = sqrt(pvNorm[0] + pvNorm[1] + pvNorm[2] + pvNorm[3]);
  125. if (norm < 1e-9) norm = 1.0; // to avoid div by zero
  126. float *pvSum = (float*)&vSum;
  127. return (double)(pvSum[0] + pvSum[1] + pvSum[2] + pvSum[3]) / norm;
  128. /* This is approximately corresponding routine in C-language yet without normalization:
  129. double corr, norm;
  130. uint i;
  131. // Calculates the cross-correlation value between 'pV1' and 'pV2' vectors
  132. corr = norm = 0.0;
  133. for (i = 0; i < channels * overlapLength / 16; i ++)
  134. {
  135. corr += pV1[0] * pV2[0] +
  136. pV1[1] * pV2[1] +
  137. pV1[2] * pV2[2] +
  138. pV1[3] * pV2[3] +
  139. pV1[4] * pV2[4] +
  140. pV1[5] * pV2[5] +
  141. pV1[6] * pV2[6] +
  142. pV1[7] * pV2[7] +
  143. pV1[8] * pV2[8] +
  144. pV1[9] * pV2[9] +
  145. pV1[10] * pV2[10] +
  146. pV1[11] * pV2[11] +
  147. pV1[12] * pV2[12] +
  148. pV1[13] * pV2[13] +
  149. pV1[14] * pV2[14] +
  150. pV1[15] * pV2[15];
  151. for (j = 0; j < 15; j ++) norm += pV1[j] * pV1[j];
  152. pV1 += 16;
  153. pV2 += 16;
  154. }
  155. return corr / sqrt(norm);
  156. */
  157. }
  158. //////////////////////////////////////////////////////////////////////////////
  159. //
  160. // implementation of SSE optimized functions of class 'FIRFilter'
  161. //
  162. //////////////////////////////////////////////////////////////////////////////
  163. #include "FIRFilter.h"
  164. FIRFilterSSE::FIRFilterSSE() : FIRFilter()
  165. {
  166. filterCoeffsAlign = NULL;
  167. filterCoeffsUnalign = NULL;
  168. }
  169. FIRFilterSSE::~FIRFilterSSE()
  170. {
  171. delete[] filterCoeffsUnalign;
  172. filterCoeffsAlign = NULL;
  173. filterCoeffsUnalign = NULL;
  174. }
  175. // (overloaded) Calculates filter coefficients for SSE routine
  176. void FIRFilterSSE::setCoefficients(const float *coeffs, uint newLength, uint uResultDivFactor)
  177. {
  178. uint i;
  179. float fDivider;
  180. FIRFilter::setCoefficients(coeffs, newLength, uResultDivFactor);
  181. // Scale the filter coefficients so that it won't be necessary to scale the filtering result
  182. // also rearrange coefficients suitably for SSE
  183. // Ensure that filter coeffs array is aligned to 16-byte boundary
  184. delete[] filterCoeffsUnalign;
  185. filterCoeffsUnalign = new float[2 * newLength + 4];
  186. filterCoeffsAlign = (float *)SOUNDTOUCH_ALIGN_POINTER_16(filterCoeffsUnalign);
  187. fDivider = (float)resultDivider;
  188. // rearrange the filter coefficients for mmx routines
  189. for (i = 0; i < newLength; i ++)
  190. {
  191. filterCoeffsAlign[2 * i + 0] =
  192. filterCoeffsAlign[2 * i + 1] = coeffs[i + 0] / fDivider;
  193. }
  194. }
  195. // SSE-optimized version of the filter routine for stereo sound
  196. uint FIRFilterSSE::evaluateFilterStereo(float *dest, const float *source, uint numSamples) const
  197. {
  198. int count = (int)((numSamples - length) & (uint)-2);
  199. int j;
  200. assert(count % 2 == 0);
  201. if (count < 2) return 0;
  202. assert(source != NULL);
  203. assert(dest != NULL);
  204. assert((length % 8) == 0);
  205. assert(filterCoeffsAlign != NULL);
  206. assert(((ulongptr)filterCoeffsAlign) % 16 == 0);
  207. // filter is evaluated for two stereo samples with each iteration, thus use of 'j += 2'
  208. for (j = 0; j < count; j += 2)
  209. {
  210. const float *pSrc;
  211. const __m128 *pFil;
  212. __m128 sum1, sum2;
  213. uint i;
  214. pSrc = (const float*)source; // source audio data
  215. pFil = (const __m128*)filterCoeffsAlign; // filter coefficients. NOTE: Assumes coefficients
  216. // are aligned to 16-byte boundary
  217. sum1 = sum2 = _mm_setzero_ps();
  218. for (i = 0; i < length / 8; i ++)
  219. {
  220. // Unroll loop for efficiency & calculate filter for 2*2 stereo samples
  221. // at each pass
  222. // sum1 is accu for 2*2 filtered stereo sound data at the primary sound data offset
  223. // sum2 is accu for 2*2 filtered stereo sound data for the next sound sample offset.
  224. sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc) , pFil[0]));
  225. sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 2), pFil[0]));
  226. sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 4), pFil[1]));
  227. sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 6), pFil[1]));
  228. sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 8) , pFil[2]));
  229. sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 10), pFil[2]));
  230. sum1 = _mm_add_ps(sum1, _mm_mul_ps(_mm_loadu_ps(pSrc + 12), pFil[3]));
  231. sum2 = _mm_add_ps(sum2, _mm_mul_ps(_mm_loadu_ps(pSrc + 14), pFil[3]));
  232. pSrc += 16;
  233. pFil += 4;
  234. }
  235. // Now sum1 and sum2 both have a filtered 2-channel sample each, but we still need
  236. // to sum the two hi- and lo-floats of these registers together.
  237. // post-shuffle & add the filtered values and store to dest.
  238. _mm_storeu_ps(dest, _mm_add_ps(
  239. _mm_shuffle_ps(sum1, sum2, _MM_SHUFFLE(1,0,3,2)), // s2_1 s2_0 s1_3 s1_2
  240. _mm_shuffle_ps(sum1, sum2, _MM_SHUFFLE(3,2,1,0)) // s2_3 s2_2 s1_1 s1_0
  241. ));
  242. source += 4;
  243. dest += 4;
  244. }
  245. // Ideas for further improvement:
  246. // 1. If it could be guaranteed that 'source' were always aligned to 16-byte
  247. // boundary, a faster aligned '_mm_load_ps' instruction could be used.
  248. // 2. If it could be guaranteed that 'dest' were always aligned to 16-byte
  249. // boundary, a faster '_mm_store_ps' instruction could be used.
  250. return (uint)count;
  251. /* original routine in C-language. please notice the C-version has differently
  252. organized coefficients though.
  253. double suml1, suml2;
  254. double sumr1, sumr2;
  255. uint i, j;
  256. for (j = 0; j < count; j += 2)
  257. {
  258. const float *ptr;
  259. const float *pFil;
  260. suml1 = sumr1 = 0.0;
  261. suml2 = sumr2 = 0.0;
  262. ptr = src;
  263. pFil = filterCoeffs;
  264. for (i = 0; i < lengthLocal; i ++)
  265. {
  266. // unroll loop for efficiency.
  267. suml1 += ptr[0] * pFil[0] +
  268. ptr[2] * pFil[2] +
  269. ptr[4] * pFil[4] +
  270. ptr[6] * pFil[6];
  271. sumr1 += ptr[1] * pFil[1] +
  272. ptr[3] * pFil[3] +
  273. ptr[5] * pFil[5] +
  274. ptr[7] * pFil[7];
  275. suml2 += ptr[8] * pFil[0] +
  276. ptr[10] * pFil[2] +
  277. ptr[12] * pFil[4] +
  278. ptr[14] * pFil[6];
  279. sumr2 += ptr[9] * pFil[1] +
  280. ptr[11] * pFil[3] +
  281. ptr[13] * pFil[5] +
  282. ptr[15] * pFil[7];
  283. ptr += 16;
  284. pFil += 8;
  285. }
  286. dest[0] = (float)suml1;
  287. dest[1] = (float)sumr1;
  288. dest[2] = (float)suml2;
  289. dest[3] = (float)sumr2;
  290. src += 4;
  291. dest += 4;
  292. }
  293. */
  294. }
  295. #endif // SOUNDTOUCH_ALLOW_SSE