core_cm4_simd.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. /**************************************************************************//**
  2. * @file core_cm4_simd.h
  3. * @brief CMSIS Cortex-M4 SIMD Header File
  4. * @version V3.30
  5. * @date 17. February 2014
  6. *
  7. * @note
  8. *
  9. ******************************************************************************/
  10. /* Copyright (c) 2009 - 2014 ARM LIMITED
  11. All rights reserved.
  12. Redistribution and use in source and binary forms, with or without
  13. modification, are permitted provided that the following conditions are met:
  14. - Redistributions of source code must retain the above copyright
  15. notice, this list of conditions and the following disclaimer.
  16. - Redistributions in binary form must reproduce the above copyright
  17. notice, this list of conditions and the following disclaimer in the
  18. documentation and/or other materials provided with the distribution.
  19. - Neither the name of ARM nor the names of its contributors may be used
  20. to endorse or promote products derived from this software without
  21. specific prior written permission.
  22. *
  23. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24. AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  25. IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26. ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
  27. LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28. CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29. SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  30. INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  31. CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  32. ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  33. POSSIBILITY OF SUCH DAMAGE.
  34. ---------------------------------------------------------------------------*/
  35. #if defined ( __ICCARM__ )
  36. #pragma system_include /* treat file as system include file for MISRA check */
  37. #endif
  38. #ifndef __CORE_CM4_SIMD_H
  39. #define __CORE_CM4_SIMD_H
  40. #ifdef __cplusplus
  41. extern "C" {
  42. #endif
  43. /*******************************************************************************
  44. * Hardware Abstraction Layer
  45. ******************************************************************************/
  46. /* ################### Compiler specific Intrinsics ########################### */
  47. /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
  48. Access to dedicated SIMD instructions
  49. @{
  50. */
  51. #if defined ( __CC_ARM ) /*------------------RealView Compiler -----------------*/
  52. /* ARM armcc specific functions */
  53. #define __SADD8 __sadd8
  54. #define __QADD8 __qadd8
  55. #define __SHADD8 __shadd8
  56. #define __UADD8 __uadd8
  57. #define __UQADD8 __uqadd8
  58. #define __UHADD8 __uhadd8
  59. #define __SSUB8 __ssub8
  60. #define __QSUB8 __qsub8
  61. #define __SHSUB8 __shsub8
  62. #define __USUB8 __usub8
  63. #define __UQSUB8 __uqsub8
  64. #define __UHSUB8 __uhsub8
  65. #define __SADD16 __sadd16
  66. #define __QADD16 __qadd16
  67. #define __SHADD16 __shadd16
  68. #define __UADD16 __uadd16
  69. #define __UQADD16 __uqadd16
  70. #define __UHADD16 __uhadd16
  71. #define __SSUB16 __ssub16
  72. #define __QSUB16 __qsub16
  73. #define __SHSUB16 __shsub16
  74. #define __USUB16 __usub16
  75. #define __UQSUB16 __uqsub16
  76. #define __UHSUB16 __uhsub16
  77. #define __SASX __sasx
  78. #define __QASX __qasx
  79. #define __SHASX __shasx
  80. #define __UASX __uasx
  81. #define __UQASX __uqasx
  82. #define __UHASX __uhasx
  83. #define __SSAX __ssax
  84. #define __QSAX __qsax
  85. #define __SHSAX __shsax
  86. #define __USAX __usax
  87. #define __UQSAX __uqsax
  88. #define __UHSAX __uhsax
  89. #define __USAD8 __usad8
  90. #define __USADA8 __usada8
  91. #define __SSAT16 __ssat16
  92. #define __USAT16 __usat16
  93. #define __UXTB16 __uxtb16
  94. #define __UXTAB16 __uxtab16
  95. #define __SXTB16 __sxtb16
  96. #define __SXTAB16 __sxtab16
  97. #define __SMUAD __smuad
  98. #define __SMUADX __smuadx
  99. #define __SMLAD __smlad
  100. #define __SMLADX __smladx
  101. #define __SMLALD __smlald
  102. #define __SMLALDX __smlaldx
  103. #define __SMUSD __smusd
  104. #define __SMUSDX __smusdx
  105. #define __SMLSD __smlsd
  106. #define __SMLSDX __smlsdx
  107. #define __SMLSLD __smlsld
  108. #define __SMLSLDX __smlsldx
  109. #define __SEL __sel
  110. #define __QADD __qadd
  111. #define __QSUB __qsub
  112. #define __PKHBT(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0x0000FFFFUL) | \
  113. ((((uint32_t)(ARG2)) << (ARG3)) & 0xFFFF0000UL) )
  114. #define __PKHTB(ARG1,ARG2,ARG3) ( ((((uint32_t)(ARG1)) ) & 0xFFFF0000UL) | \
  115. ((((uint32_t)(ARG2)) >> (ARG3)) & 0x0000FFFFUL) )
  116. #define __SMMLA(ARG1,ARG2,ARG3) ( (int32_t)((((int64_t)(ARG1) * (ARG2)) + \
  117. ((int64_t)(ARG3) << 32) ) >> 32))
  118. #elif defined ( __GNUC__ ) /*------------------ GNU Compiler ---------------------*/
  119. /* GNU gcc specific functions */
  120. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
  121. {
  122. uint32_t result;
  123. __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  124. return(result);
  125. }
  126. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
  127. {
  128. uint32_t result;
  129. __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  130. return(result);
  131. }
  132. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
  133. {
  134. uint32_t result;
  135. __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  136. return(result);
  137. }
  138. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
  139. {
  140. uint32_t result;
  141. __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  142. return(result);
  143. }
  144. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
  145. {
  146. uint32_t result;
  147. __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  148. return(result);
  149. }
  150. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
  151. {
  152. uint32_t result;
  153. __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  154. return(result);
  155. }
  156. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
  157. {
  158. uint32_t result;
  159. __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  160. return(result);
  161. }
  162. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
  163. {
  164. uint32_t result;
  165. __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  166. return(result);
  167. }
  168. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
  169. {
  170. uint32_t result;
  171. __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  172. return(result);
  173. }
  174. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
  175. {
  176. uint32_t result;
  177. __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  178. return(result);
  179. }
  180. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
  181. {
  182. uint32_t result;
  183. __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  184. return(result);
  185. }
  186. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
  187. {
  188. uint32_t result;
  189. __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  190. return(result);
  191. }
  192. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
  193. {
  194. uint32_t result;
  195. __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  196. return(result);
  197. }
  198. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
  199. {
  200. uint32_t result;
  201. __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  202. return(result);
  203. }
  204. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
  205. {
  206. uint32_t result;
  207. __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  208. return(result);
  209. }
  210. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
  211. {
  212. uint32_t result;
  213. __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  214. return(result);
  215. }
  216. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
  217. {
  218. uint32_t result;
  219. __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  220. return(result);
  221. }
  222. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
  223. {
  224. uint32_t result;
  225. __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  226. return(result);
  227. }
  228. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
  229. {
  230. uint32_t result;
  231. __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  232. return(result);
  233. }
  234. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
  235. {
  236. uint32_t result;
  237. __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  238. return(result);
  239. }
  240. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
  241. {
  242. uint32_t result;
  243. __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  244. return(result);
  245. }
  246. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
  247. {
  248. uint32_t result;
  249. __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  250. return(result);
  251. }
  252. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
  253. {
  254. uint32_t result;
  255. __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  256. return(result);
  257. }
  258. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
  259. {
  260. uint32_t result;
  261. __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  262. return(result);
  263. }
  264. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
  265. {
  266. uint32_t result;
  267. __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  268. return(result);
  269. }
  270. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
  271. {
  272. uint32_t result;
  273. __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  274. return(result);
  275. }
  276. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
  277. {
  278. uint32_t result;
  279. __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  280. return(result);
  281. }
  282. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
  283. {
  284. uint32_t result;
  285. __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  286. return(result);
  287. }
  288. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
  289. {
  290. uint32_t result;
  291. __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  292. return(result);
  293. }
  294. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
  295. {
  296. uint32_t result;
  297. __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  298. return(result);
  299. }
  300. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
  301. {
  302. uint32_t result;
  303. __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  304. return(result);
  305. }
  306. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
  307. {
  308. uint32_t result;
  309. __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  310. return(result);
  311. }
  312. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
  313. {
  314. uint32_t result;
  315. __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  316. return(result);
  317. }
  318. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
  319. {
  320. uint32_t result;
  321. __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  322. return(result);
  323. }
  324. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
  325. {
  326. uint32_t result;
  327. __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  328. return(result);
  329. }
  330. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
  331. {
  332. uint32_t result;
  333. __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  334. return(result);
  335. }
  336. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
  337. {
  338. uint32_t result;
  339. __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  340. return(result);
  341. }
  342. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
  343. {
  344. uint32_t result;
  345. __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  346. return(result);
  347. }
  348. #define __SSAT16(ARG1,ARG2) \
  349. ({ \
  350. uint32_t __RES, __ARG1 = (ARG1); \
  351. __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
  352. __RES; \
  353. })
  354. #define __USAT16(ARG1,ARG2) \
  355. ({ \
  356. uint32_t __RES, __ARG1 = (ARG1); \
  357. __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
  358. __RES; \
  359. })
  360. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
  361. {
  362. uint32_t result;
  363. __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
  364. return(result);
  365. }
  366. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
  367. {
  368. uint32_t result;
  369. __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  370. return(result);
  371. }
  372. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
  373. {
  374. uint32_t result;
  375. __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
  376. return(result);
  377. }
  378. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
  379. {
  380. uint32_t result;
  381. __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  382. return(result);
  383. }
  384. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
  385. {
  386. uint32_t result;
  387. __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  388. return(result);
  389. }
  390. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
  391. {
  392. uint32_t result;
  393. __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  394. return(result);
  395. }
  396. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
  397. {
  398. uint32_t result;
  399. __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  400. return(result);
  401. }
  402. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
  403. {
  404. uint32_t result;
  405. __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  406. return(result);
  407. }
  408. __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
  409. {
  410. union llreg_u{
  411. uint32_t w32[2];
  412. uint64_t w64;
  413. } llr;
  414. llr.w64 = acc;
  415. #ifndef __ARMEB__ // Little endian
  416. __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
  417. #else // Big endian
  418. __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
  419. #endif
  420. return(llr.w64);
  421. }
  422. __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
  423. {
  424. union llreg_u{
  425. uint32_t w32[2];
  426. uint64_t w64;
  427. } llr;
  428. llr.w64 = acc;
  429. #ifndef __ARMEB__ // Little endian
  430. __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
  431. #else // Big endian
  432. __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
  433. #endif
  434. return(llr.w64);
  435. }
  436. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
  437. {
  438. uint32_t result;
  439. __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  440. return(result);
  441. }
  442. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
  443. {
  444. uint32_t result;
  445. __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  446. return(result);
  447. }
  448. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
  449. {
  450. uint32_t result;
  451. __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  452. return(result);
  453. }
  454. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
  455. {
  456. uint32_t result;
  457. __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
  458. return(result);
  459. }
  460. __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
  461. {
  462. union llreg_u{
  463. uint32_t w32[2];
  464. uint64_t w64;
  465. } llr;
  466. llr.w64 = acc;
  467. #ifndef __ARMEB__ // Little endian
  468. __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
  469. #else // Big endian
  470. __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
  471. #endif
  472. return(llr.w64);
  473. }
  474. __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
  475. {
  476. union llreg_u{
  477. uint32_t w32[2];
  478. uint64_t w64;
  479. } llr;
  480. llr.w64 = acc;
  481. #ifndef __ARMEB__ // Little endian
  482. __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
  483. #else // Big endian
  484. __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
  485. #endif
  486. return(llr.w64);
  487. }
  488. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
  489. {
  490. uint32_t result;
  491. __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  492. return(result);
  493. }
  494. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD(uint32_t op1, uint32_t op2)
  495. {
  496. uint32_t result;
  497. __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  498. return(result);
  499. }
  500. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB(uint32_t op1, uint32_t op2)
  501. {
  502. uint32_t result;
  503. __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
  504. return(result);
  505. }
  506. #define __PKHBT(ARG1,ARG2,ARG3) \
  507. ({ \
  508. uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
  509. __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
  510. __RES; \
  511. })
  512. #define __PKHTB(ARG1,ARG2,ARG3) \
  513. ({ \
  514. uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
  515. if (ARG3 == 0) \
  516. __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
  517. else \
  518. __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
  519. __RES; \
  520. })
  521. __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
  522. {
  523. int32_t result;
  524. __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
  525. return(result);
  526. }
  527. #elif defined ( __ICCARM__ ) /*------------------ ICC Compiler -------------------*/
  528. /* IAR iccarm specific functions */
  529. #include <cmsis_iar.h>
  530. #elif defined ( __TMS470__ ) /*---------------- TI CCS Compiler ------------------*/
  531. /* TI CCS specific functions */
  532. #include <cmsis_ccs.h>
  533. #elif defined ( __TASKING__ ) /*------------------ TASKING Compiler --------------*/
  534. /* TASKING carm specific functions */
  535. /* not yet supported */
  536. #elif defined ( __CSMC__ ) /*------------------ COSMIC Compiler -------------------*/
  537. /* Cosmic specific functions */
  538. #include <cmsis_csm.h>
  539. #endif
  540. /*@} end of group CMSIS_SIMD_intrinsics */
  541. #ifdef __cplusplus
  542. }
  543. #endif
  544. #endif /* __CORE_CM4_SIMD_H */