1*150812a8SEvalZero /**************************************************************************//**
2*150812a8SEvalZero * @file cmsis_gcc.h
3*150812a8SEvalZero * @brief CMSIS Cortex-M Core Function/Instruction Header File
4*150812a8SEvalZero * @version V4.30
5*150812a8SEvalZero * @date 20. October 2015
6*150812a8SEvalZero ******************************************************************************/
7*150812a8SEvalZero /* Copyright (c) 2009 - 2015 ARM LIMITED
8*150812a8SEvalZero
9*150812a8SEvalZero All rights reserved.
10*150812a8SEvalZero Redistribution and use in source and binary forms, with or without
11*150812a8SEvalZero modification, are permitted provided that the following conditions are met:
12*150812a8SEvalZero - Redistributions of source code must retain the above copyright
13*150812a8SEvalZero notice, this list of conditions and the following disclaimer.
14*150812a8SEvalZero - Redistributions in binary form must reproduce the above copyright
15*150812a8SEvalZero notice, this list of conditions and the following disclaimer in the
16*150812a8SEvalZero documentation and/or other materials provided with the distribution.
17*150812a8SEvalZero - Neither the name of ARM nor the names of its contributors may be used
18*150812a8SEvalZero to endorse or promote products derived from this software without
19*150812a8SEvalZero specific prior written permission.
20*150812a8SEvalZero *
21*150812a8SEvalZero THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22*150812a8SEvalZero AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23*150812a8SEvalZero IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24*150812a8SEvalZero ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS AND CONTRIBUTORS BE
25*150812a8SEvalZero LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26*150812a8SEvalZero CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27*150812a8SEvalZero SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28*150812a8SEvalZero INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29*150812a8SEvalZero CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30*150812a8SEvalZero ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31*150812a8SEvalZero POSSIBILITY OF SUCH DAMAGE.
32*150812a8SEvalZero ---------------------------------------------------------------------------*/
33*150812a8SEvalZero
34*150812a8SEvalZero
35*150812a8SEvalZero #ifndef __CMSIS_GCC_H
36*150812a8SEvalZero #define __CMSIS_GCC_H
37*150812a8SEvalZero
38*150812a8SEvalZero /* ignore some GCC warnings */
39*150812a8SEvalZero #if defined ( __GNUC__ )
40*150812a8SEvalZero #pragma GCC diagnostic push
41*150812a8SEvalZero #pragma GCC diagnostic ignored "-Wsign-conversion"
42*150812a8SEvalZero #pragma GCC diagnostic ignored "-Wconversion"
43*150812a8SEvalZero #pragma GCC diagnostic ignored "-Wunused-parameter"
44*150812a8SEvalZero #endif
45*150812a8SEvalZero
46*150812a8SEvalZero
47*150812a8SEvalZero /* ########################### Core Function Access ########################### */
48*150812a8SEvalZero /** \ingroup CMSIS_Core_FunctionInterface
49*150812a8SEvalZero \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions
50*150812a8SEvalZero @{
51*150812a8SEvalZero */
52*150812a8SEvalZero
53*150812a8SEvalZero /**
54*150812a8SEvalZero \brief Enable IRQ Interrupts
55*150812a8SEvalZero \details Enables IRQ interrupts by clearing the I-bit in the CPSR.
56*150812a8SEvalZero Can only be executed in Privileged modes.
57*150812a8SEvalZero */
__enable_irq(void)58*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_irq(void)
59*150812a8SEvalZero {
60*150812a8SEvalZero __ASM volatile ("cpsie i" : : : "memory");
61*150812a8SEvalZero }
62*150812a8SEvalZero
63*150812a8SEvalZero
64*150812a8SEvalZero /**
65*150812a8SEvalZero \brief Disable IRQ Interrupts
66*150812a8SEvalZero \details Disables IRQ interrupts by setting the I-bit in the CPSR.
67*150812a8SEvalZero Can only be executed in Privileged modes.
68*150812a8SEvalZero */
__disable_irq(void)69*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_irq(void)
70*150812a8SEvalZero {
71*150812a8SEvalZero __ASM volatile ("cpsid i" : : : "memory");
72*150812a8SEvalZero }
73*150812a8SEvalZero
74*150812a8SEvalZero
75*150812a8SEvalZero /**
76*150812a8SEvalZero \brief Get Control Register
77*150812a8SEvalZero \details Returns the content of the Control Register.
78*150812a8SEvalZero \return Control Register value
79*150812a8SEvalZero */
__get_CONTROL(void)80*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_CONTROL(void)
81*150812a8SEvalZero {
82*150812a8SEvalZero uint32_t result;
83*150812a8SEvalZero
84*150812a8SEvalZero __ASM volatile ("MRS %0, control" : "=r" (result) );
85*150812a8SEvalZero return(result);
86*150812a8SEvalZero }
87*150812a8SEvalZero
88*150812a8SEvalZero
89*150812a8SEvalZero /**
90*150812a8SEvalZero \brief Set Control Register
91*150812a8SEvalZero \details Writes the given value to the Control Register.
92*150812a8SEvalZero \param [in] control Control Register value to set
93*150812a8SEvalZero */
__set_CONTROL(uint32_t control)94*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_CONTROL(uint32_t control)
95*150812a8SEvalZero {
96*150812a8SEvalZero __ASM volatile ("MSR control, %0" : : "r" (control) : "memory");
97*150812a8SEvalZero }
98*150812a8SEvalZero
99*150812a8SEvalZero
100*150812a8SEvalZero /**
101*150812a8SEvalZero \brief Get IPSR Register
102*150812a8SEvalZero \details Returns the content of the IPSR Register.
103*150812a8SEvalZero \return IPSR Register value
104*150812a8SEvalZero */
__get_IPSR(void)105*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_IPSR(void)
106*150812a8SEvalZero {
107*150812a8SEvalZero uint32_t result;
108*150812a8SEvalZero
109*150812a8SEvalZero __ASM volatile ("MRS %0, ipsr" : "=r" (result) );
110*150812a8SEvalZero return(result);
111*150812a8SEvalZero }
112*150812a8SEvalZero
113*150812a8SEvalZero
114*150812a8SEvalZero /**
115*150812a8SEvalZero \brief Get APSR Register
116*150812a8SEvalZero \details Returns the content of the APSR Register.
117*150812a8SEvalZero \return APSR Register value
118*150812a8SEvalZero */
__get_APSR(void)119*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_APSR(void)
120*150812a8SEvalZero {
121*150812a8SEvalZero uint32_t result;
122*150812a8SEvalZero
123*150812a8SEvalZero __ASM volatile ("MRS %0, apsr" : "=r" (result) );
124*150812a8SEvalZero return(result);
125*150812a8SEvalZero }
126*150812a8SEvalZero
127*150812a8SEvalZero
128*150812a8SEvalZero /**
129*150812a8SEvalZero \brief Get xPSR Register
130*150812a8SEvalZero \details Returns the content of the xPSR Register.
131*150812a8SEvalZero
132*150812a8SEvalZero \return xPSR Register value
133*150812a8SEvalZero */
__get_xPSR(void)134*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_xPSR(void)
135*150812a8SEvalZero {
136*150812a8SEvalZero uint32_t result;
137*150812a8SEvalZero
138*150812a8SEvalZero __ASM volatile ("MRS %0, xpsr" : "=r" (result) );
139*150812a8SEvalZero return(result);
140*150812a8SEvalZero }
141*150812a8SEvalZero
142*150812a8SEvalZero
143*150812a8SEvalZero /**
144*150812a8SEvalZero \brief Get Process Stack Pointer
145*150812a8SEvalZero \details Returns the current value of the Process Stack Pointer (PSP).
146*150812a8SEvalZero \return PSP Register value
147*150812a8SEvalZero */
__get_PSP(void)148*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PSP(void)
149*150812a8SEvalZero {
150*150812a8SEvalZero register uint32_t result;
151*150812a8SEvalZero
152*150812a8SEvalZero __ASM volatile ("MRS %0, psp\n" : "=r" (result) );
153*150812a8SEvalZero return(result);
154*150812a8SEvalZero }
155*150812a8SEvalZero
156*150812a8SEvalZero
157*150812a8SEvalZero /**
158*150812a8SEvalZero \brief Set Process Stack Pointer
159*150812a8SEvalZero \details Assigns the given value to the Process Stack Pointer (PSP).
160*150812a8SEvalZero \param [in] topOfProcStack Process Stack Pointer value to set
161*150812a8SEvalZero */
__set_PSP(uint32_t topOfProcStack)162*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PSP(uint32_t topOfProcStack)
163*150812a8SEvalZero {
164*150812a8SEvalZero __ASM volatile ("MSR psp, %0\n" : : "r" (topOfProcStack) : "sp");
165*150812a8SEvalZero }
166*150812a8SEvalZero
167*150812a8SEvalZero
168*150812a8SEvalZero /**
169*150812a8SEvalZero \brief Get Main Stack Pointer
170*150812a8SEvalZero \details Returns the current value of the Main Stack Pointer (MSP).
171*150812a8SEvalZero \return MSP Register value
172*150812a8SEvalZero */
__get_MSP(void)173*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_MSP(void)
174*150812a8SEvalZero {
175*150812a8SEvalZero register uint32_t result;
176*150812a8SEvalZero
177*150812a8SEvalZero __ASM volatile ("MRS %0, msp\n" : "=r" (result) );
178*150812a8SEvalZero return(result);
179*150812a8SEvalZero }
180*150812a8SEvalZero
181*150812a8SEvalZero
182*150812a8SEvalZero /**
183*150812a8SEvalZero \brief Set Main Stack Pointer
184*150812a8SEvalZero \details Assigns the given value to the Main Stack Pointer (MSP).
185*150812a8SEvalZero
186*150812a8SEvalZero \param [in] topOfMainStack Main Stack Pointer value to set
187*150812a8SEvalZero */
__set_MSP(uint32_t topOfMainStack)188*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_MSP(uint32_t topOfMainStack)
189*150812a8SEvalZero {
190*150812a8SEvalZero __ASM volatile ("MSR msp, %0\n" : : "r" (topOfMainStack) : "sp");
191*150812a8SEvalZero }
192*150812a8SEvalZero
193*150812a8SEvalZero
194*150812a8SEvalZero /**
195*150812a8SEvalZero \brief Get Priority Mask
196*150812a8SEvalZero \details Returns the current state of the priority mask bit from the Priority Mask Register.
197*150812a8SEvalZero \return Priority Mask value
198*150812a8SEvalZero */
__get_PRIMASK(void)199*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_PRIMASK(void)
200*150812a8SEvalZero {
201*150812a8SEvalZero uint32_t result;
202*150812a8SEvalZero
203*150812a8SEvalZero __ASM volatile ("MRS %0, primask" : "=r" (result) );
204*150812a8SEvalZero return(result);
205*150812a8SEvalZero }
206*150812a8SEvalZero
207*150812a8SEvalZero
208*150812a8SEvalZero /**
209*150812a8SEvalZero \brief Set Priority Mask
210*150812a8SEvalZero \details Assigns the given value to the Priority Mask Register.
211*150812a8SEvalZero \param [in] priMask Priority Mask
212*150812a8SEvalZero */
__set_PRIMASK(uint32_t priMask)213*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_PRIMASK(uint32_t priMask)
214*150812a8SEvalZero {
215*150812a8SEvalZero __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory");
216*150812a8SEvalZero }
217*150812a8SEvalZero
218*150812a8SEvalZero
219*150812a8SEvalZero #if (__CORTEX_M >= 0x03U)
220*150812a8SEvalZero
221*150812a8SEvalZero /**
222*150812a8SEvalZero \brief Enable FIQ
223*150812a8SEvalZero \details Enables FIQ interrupts by clearing the F-bit in the CPSR.
224*150812a8SEvalZero Can only be executed in Privileged modes.
225*150812a8SEvalZero */
__enable_fault_irq(void)226*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __enable_fault_irq(void)
227*150812a8SEvalZero {
228*150812a8SEvalZero __ASM volatile ("cpsie f" : : : "memory");
229*150812a8SEvalZero }
230*150812a8SEvalZero
231*150812a8SEvalZero
232*150812a8SEvalZero /**
233*150812a8SEvalZero \brief Disable FIQ
234*150812a8SEvalZero \details Disables FIQ interrupts by setting the F-bit in the CPSR.
235*150812a8SEvalZero Can only be executed in Privileged modes.
236*150812a8SEvalZero */
__disable_fault_irq(void)237*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __disable_fault_irq(void)
238*150812a8SEvalZero {
239*150812a8SEvalZero __ASM volatile ("cpsid f" : : : "memory");
240*150812a8SEvalZero }
241*150812a8SEvalZero
242*150812a8SEvalZero
243*150812a8SEvalZero /**
244*150812a8SEvalZero \brief Get Base Priority
245*150812a8SEvalZero \details Returns the current value of the Base Priority register.
246*150812a8SEvalZero \return Base Priority register value
247*150812a8SEvalZero */
__get_BASEPRI(void)248*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_BASEPRI(void)
249*150812a8SEvalZero {
250*150812a8SEvalZero uint32_t result;
251*150812a8SEvalZero
252*150812a8SEvalZero __ASM volatile ("MRS %0, basepri" : "=r" (result) );
253*150812a8SEvalZero return(result);
254*150812a8SEvalZero }
255*150812a8SEvalZero
256*150812a8SEvalZero
257*150812a8SEvalZero /**
258*150812a8SEvalZero \brief Set Base Priority
259*150812a8SEvalZero \details Assigns the given value to the Base Priority register.
260*150812a8SEvalZero \param [in] basePri Base Priority value to set
261*150812a8SEvalZero */
__set_BASEPRI(uint32_t value)262*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI(uint32_t value)
263*150812a8SEvalZero {
264*150812a8SEvalZero __ASM volatile ("MSR basepri, %0" : : "r" (value) : "memory");
265*150812a8SEvalZero }
266*150812a8SEvalZero
267*150812a8SEvalZero
268*150812a8SEvalZero /**
269*150812a8SEvalZero \brief Set Base Priority with condition
270*150812a8SEvalZero \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled,
271*150812a8SEvalZero or the new value increases the BASEPRI priority level.
272*150812a8SEvalZero \param [in] basePri Base Priority value to set
273*150812a8SEvalZero */
__set_BASEPRI_MAX(uint32_t value)274*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_BASEPRI_MAX(uint32_t value)
275*150812a8SEvalZero {
276*150812a8SEvalZero __ASM volatile ("MSR basepri_max, %0" : : "r" (value) : "memory");
277*150812a8SEvalZero }
278*150812a8SEvalZero
279*150812a8SEvalZero
280*150812a8SEvalZero /**
281*150812a8SEvalZero \brief Get Fault Mask
282*150812a8SEvalZero \details Returns the current value of the Fault Mask register.
283*150812a8SEvalZero \return Fault Mask register value
284*150812a8SEvalZero */
__get_FAULTMASK(void)285*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FAULTMASK(void)
286*150812a8SEvalZero {
287*150812a8SEvalZero uint32_t result;
288*150812a8SEvalZero
289*150812a8SEvalZero __ASM volatile ("MRS %0, faultmask" : "=r" (result) );
290*150812a8SEvalZero return(result);
291*150812a8SEvalZero }
292*150812a8SEvalZero
293*150812a8SEvalZero
294*150812a8SEvalZero /**
295*150812a8SEvalZero \brief Set Fault Mask
296*150812a8SEvalZero \details Assigns the given value to the Fault Mask register.
297*150812a8SEvalZero \param [in] faultMask Fault Mask value to set
298*150812a8SEvalZero */
__set_FAULTMASK(uint32_t faultMask)299*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask)
300*150812a8SEvalZero {
301*150812a8SEvalZero __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory");
302*150812a8SEvalZero }
303*150812a8SEvalZero
304*150812a8SEvalZero #endif /* (__CORTEX_M >= 0x03U) */
305*150812a8SEvalZero
306*150812a8SEvalZero
307*150812a8SEvalZero #if (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U)
308*150812a8SEvalZero
309*150812a8SEvalZero /**
310*150812a8SEvalZero \brief Get FPSCR
311*150812a8SEvalZero \details Returns the current value of the Floating Point Status/Control register.
312*150812a8SEvalZero \return Floating Point Status/Control register value
313*150812a8SEvalZero */
__get_FPSCR(void)314*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __get_FPSCR(void)
315*150812a8SEvalZero {
316*150812a8SEvalZero #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
317*150812a8SEvalZero uint32_t result;
318*150812a8SEvalZero
319*150812a8SEvalZero /* Empty asm statement works as a scheduling barrier */
320*150812a8SEvalZero __ASM volatile ("");
321*150812a8SEvalZero __ASM volatile ("VMRS %0, fpscr" : "=r" (result) );
322*150812a8SEvalZero __ASM volatile ("");
323*150812a8SEvalZero return(result);
324*150812a8SEvalZero #else
325*150812a8SEvalZero return(0);
326*150812a8SEvalZero #endif
327*150812a8SEvalZero }
328*150812a8SEvalZero
329*150812a8SEvalZero
330*150812a8SEvalZero /**
331*150812a8SEvalZero \brief Set FPSCR
332*150812a8SEvalZero \details Assigns the given value to the Floating Point Status/Control register.
333*150812a8SEvalZero \param [in] fpscr Floating Point Status/Control value to set
334*150812a8SEvalZero */
__set_FPSCR(uint32_t fpscr)335*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE void __set_FPSCR(uint32_t fpscr)
336*150812a8SEvalZero {
337*150812a8SEvalZero #if (__FPU_PRESENT == 1U) && (__FPU_USED == 1U)
338*150812a8SEvalZero /* Empty asm statement works as a scheduling barrier */
339*150812a8SEvalZero __ASM volatile ("");
340*150812a8SEvalZero __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc");
341*150812a8SEvalZero __ASM volatile ("");
342*150812a8SEvalZero #endif
343*150812a8SEvalZero }
344*150812a8SEvalZero
345*150812a8SEvalZero #endif /* (__CORTEX_M == 0x04U) || (__CORTEX_M == 0x07U) */
346*150812a8SEvalZero
347*150812a8SEvalZero
348*150812a8SEvalZero
349*150812a8SEvalZero /*@} end of CMSIS_Core_RegAccFunctions */
350*150812a8SEvalZero
351*150812a8SEvalZero
352*150812a8SEvalZero /* ########################## Core Instruction Access ######################### */
353*150812a8SEvalZero /** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface
354*150812a8SEvalZero Access to dedicated instructions
355*150812a8SEvalZero @{
356*150812a8SEvalZero */
357*150812a8SEvalZero
358*150812a8SEvalZero /* Define macros for porting to both thumb1 and thumb2.
359*150812a8SEvalZero * For thumb1, use low register (r0-r7), specified by constraint "l"
360*150812a8SEvalZero * Otherwise, use general registers, specified by constraint "r" */
361*150812a8SEvalZero #if defined (__thumb__) && !defined (__thumb2__)
362*150812a8SEvalZero #define __CMSIS_GCC_OUT_REG(r) "=l" (r)
363*150812a8SEvalZero #define __CMSIS_GCC_USE_REG(r) "l" (r)
364*150812a8SEvalZero #else
365*150812a8SEvalZero #define __CMSIS_GCC_OUT_REG(r) "=r" (r)
366*150812a8SEvalZero #define __CMSIS_GCC_USE_REG(r) "r" (r)
367*150812a8SEvalZero #endif
368*150812a8SEvalZero
369*150812a8SEvalZero /**
370*150812a8SEvalZero \brief No Operation
371*150812a8SEvalZero \details No Operation does nothing. This instruction can be used for code alignment purposes.
372*150812a8SEvalZero */
__NOP(void)373*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __NOP(void)
374*150812a8SEvalZero {
375*150812a8SEvalZero __ASM volatile ("nop");
376*150812a8SEvalZero }
377*150812a8SEvalZero
378*150812a8SEvalZero
379*150812a8SEvalZero /**
380*150812a8SEvalZero \brief Wait For Interrupt
381*150812a8SEvalZero \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs.
382*150812a8SEvalZero */
__WFI(void)383*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __WFI(void)
384*150812a8SEvalZero {
385*150812a8SEvalZero __ASM volatile ("wfi");
386*150812a8SEvalZero }
387*150812a8SEvalZero
388*150812a8SEvalZero
389*150812a8SEvalZero /**
390*150812a8SEvalZero \brief Wait For Event
391*150812a8SEvalZero \details Wait For Event is a hint instruction that permits the processor to enter
392*150812a8SEvalZero a low-power state until one of a number of events occurs.
393*150812a8SEvalZero */
__WFE(void)394*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __WFE(void)
395*150812a8SEvalZero {
396*150812a8SEvalZero __ASM volatile ("wfe");
397*150812a8SEvalZero }
398*150812a8SEvalZero
399*150812a8SEvalZero
400*150812a8SEvalZero /**
401*150812a8SEvalZero \brief Send Event
402*150812a8SEvalZero \details Send Event is a hint instruction. It causes an event to be signaled to the CPU.
403*150812a8SEvalZero */
__SEV(void)404*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __SEV(void)
405*150812a8SEvalZero {
406*150812a8SEvalZero __ASM volatile ("sev");
407*150812a8SEvalZero }
408*150812a8SEvalZero
409*150812a8SEvalZero
410*150812a8SEvalZero /**
411*150812a8SEvalZero \brief Instruction Synchronization Barrier
412*150812a8SEvalZero \details Instruction Synchronization Barrier flushes the pipeline in the processor,
413*150812a8SEvalZero so that all instructions following the ISB are fetched from cache or memory,
414*150812a8SEvalZero after the instruction has been completed.
415*150812a8SEvalZero */
__ISB(void)416*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __ISB(void)
417*150812a8SEvalZero {
418*150812a8SEvalZero __ASM volatile ("isb 0xF":::"memory");
419*150812a8SEvalZero }
420*150812a8SEvalZero
421*150812a8SEvalZero
422*150812a8SEvalZero /**
423*150812a8SEvalZero \brief Data Synchronization Barrier
424*150812a8SEvalZero \details Acts as a special kind of Data Memory Barrier.
425*150812a8SEvalZero It completes when all explicit memory accesses before this instruction complete.
426*150812a8SEvalZero */
__DSB(void)427*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __DSB(void)
428*150812a8SEvalZero {
429*150812a8SEvalZero __ASM volatile ("dsb 0xF":::"memory");
430*150812a8SEvalZero }
431*150812a8SEvalZero
432*150812a8SEvalZero
433*150812a8SEvalZero /**
434*150812a8SEvalZero \brief Data Memory Barrier
435*150812a8SEvalZero \details Ensures the apparent order of the explicit memory operations before
436*150812a8SEvalZero and after the instruction, without ensuring their completion.
437*150812a8SEvalZero */
__DMB(void)438*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __DMB(void)
439*150812a8SEvalZero {
440*150812a8SEvalZero __ASM volatile ("dmb 0xF":::"memory");
441*150812a8SEvalZero }
442*150812a8SEvalZero
443*150812a8SEvalZero
444*150812a8SEvalZero /**
445*150812a8SEvalZero \brief Reverse byte order (32 bit)
446*150812a8SEvalZero \details Reverses the byte order in integer value.
447*150812a8SEvalZero \param [in] value Value to reverse
448*150812a8SEvalZero \return Reversed value
449*150812a8SEvalZero */
__REV(uint32_t value)450*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV(uint32_t value)
451*150812a8SEvalZero {
452*150812a8SEvalZero #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
453*150812a8SEvalZero return __builtin_bswap32(value);
454*150812a8SEvalZero #else
455*150812a8SEvalZero uint32_t result;
456*150812a8SEvalZero
457*150812a8SEvalZero __ASM volatile ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
458*150812a8SEvalZero return(result);
459*150812a8SEvalZero #endif
460*150812a8SEvalZero }
461*150812a8SEvalZero
462*150812a8SEvalZero
463*150812a8SEvalZero /**
464*150812a8SEvalZero \brief Reverse byte order (16 bit)
465*150812a8SEvalZero \details Reverses the byte order in two unsigned short values.
466*150812a8SEvalZero \param [in] value Value to reverse
467*150812a8SEvalZero \return Reversed value
468*150812a8SEvalZero */
__REV16(uint32_t value)469*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __REV16(uint32_t value)
470*150812a8SEvalZero {
471*150812a8SEvalZero uint32_t result;
472*150812a8SEvalZero
473*150812a8SEvalZero __ASM volatile ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
474*150812a8SEvalZero return(result);
475*150812a8SEvalZero }
476*150812a8SEvalZero
477*150812a8SEvalZero
478*150812a8SEvalZero /**
479*150812a8SEvalZero \brief Reverse byte order in signed short value
480*150812a8SEvalZero \details Reverses the byte order in a signed short value with sign extension to integer.
481*150812a8SEvalZero \param [in] value Value to reverse
482*150812a8SEvalZero \return Reversed value
483*150812a8SEvalZero */
__REVSH(int32_t value)484*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE int32_t __REVSH(int32_t value)
485*150812a8SEvalZero {
486*150812a8SEvalZero #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
487*150812a8SEvalZero return (short)__builtin_bswap16(value);
488*150812a8SEvalZero #else
489*150812a8SEvalZero int32_t result;
490*150812a8SEvalZero
491*150812a8SEvalZero __ASM volatile ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
492*150812a8SEvalZero return(result);
493*150812a8SEvalZero #endif
494*150812a8SEvalZero }
495*150812a8SEvalZero
496*150812a8SEvalZero
497*150812a8SEvalZero /**
498*150812a8SEvalZero \brief Rotate Right in unsigned value (32 bit)
499*150812a8SEvalZero \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits.
500*150812a8SEvalZero \param [in] value Value to rotate
501*150812a8SEvalZero \param [in] value Number of Bits to rotate
502*150812a8SEvalZero \return Rotated value
503*150812a8SEvalZero */
__ROR(uint32_t op1,uint32_t op2)504*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __ROR(uint32_t op1, uint32_t op2)
505*150812a8SEvalZero {
506*150812a8SEvalZero return (op1 >> op2) | (op1 << (32U - op2));
507*150812a8SEvalZero }
508*150812a8SEvalZero
509*150812a8SEvalZero
510*150812a8SEvalZero /**
511*150812a8SEvalZero \brief Breakpoint
512*150812a8SEvalZero \details Causes the processor to enter Debug state.
513*150812a8SEvalZero Debug tools can use this to investigate system state when the instruction at a particular address is reached.
514*150812a8SEvalZero \param [in] value is ignored by the processor.
515*150812a8SEvalZero If required, a debugger can use it to store additional information about the breakpoint.
516*150812a8SEvalZero */
517*150812a8SEvalZero #define __BKPT(value) __ASM volatile ("bkpt "#value)
518*150812a8SEvalZero
519*150812a8SEvalZero
520*150812a8SEvalZero /**
521*150812a8SEvalZero \brief Reverse bit order of value
522*150812a8SEvalZero \details Reverses the bit order of the given value.
523*150812a8SEvalZero \param [in] value Value to reverse
524*150812a8SEvalZero \return Reversed value
525*150812a8SEvalZero */
__RBIT(uint32_t value)526*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value)
527*150812a8SEvalZero {
528*150812a8SEvalZero uint32_t result;
529*150812a8SEvalZero
530*150812a8SEvalZero #if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
531*150812a8SEvalZero __ASM volatile ("rbit %0, %1" : "=r" (result) : "r" (value) );
532*150812a8SEvalZero #else
533*150812a8SEvalZero int32_t s = 4 /*sizeof(v)*/ * 8 - 1; /* extra shift needed at end */
534*150812a8SEvalZero
535*150812a8SEvalZero result = value; /* r will be reversed bits of v; first get LSB of v */
536*150812a8SEvalZero for (value >>= 1U; value; value >>= 1U)
537*150812a8SEvalZero {
538*150812a8SEvalZero result <<= 1U;
539*150812a8SEvalZero result |= value & 1U;
540*150812a8SEvalZero s--;
541*150812a8SEvalZero }
542*150812a8SEvalZero result <<= s; /* shift when v's highest bits are zero */
543*150812a8SEvalZero #endif
544*150812a8SEvalZero return(result);
545*150812a8SEvalZero }
546*150812a8SEvalZero
547*150812a8SEvalZero
548*150812a8SEvalZero /**
549*150812a8SEvalZero \brief Count leading zeros
550*150812a8SEvalZero \details Counts the number of leading zeros of a data value.
551*150812a8SEvalZero \param [in] value Value to count the leading zeros
552*150812a8SEvalZero \return number of leading zeros in value
553*150812a8SEvalZero */
554*150812a8SEvalZero #define __CLZ __builtin_clz
555*150812a8SEvalZero
556*150812a8SEvalZero
557*150812a8SEvalZero #if (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U)
558*150812a8SEvalZero
559*150812a8SEvalZero /**
560*150812a8SEvalZero \brief LDR Exclusive (8 bit)
561*150812a8SEvalZero \details Executes a exclusive LDR instruction for 8 bit value.
562*150812a8SEvalZero \param [in] ptr Pointer to data
563*150812a8SEvalZero \return value of type uint8_t at (*ptr)
564*150812a8SEvalZero */
__LDREXB(volatile uint8_t * addr)565*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDREXB(volatile uint8_t *addr)
566*150812a8SEvalZero {
567*150812a8SEvalZero uint32_t result;
568*150812a8SEvalZero
569*150812a8SEvalZero #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
570*150812a8SEvalZero __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) );
571*150812a8SEvalZero #else
572*150812a8SEvalZero /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
573*150812a8SEvalZero accepted by assembler. So has to use following less efficient pattern.
574*150812a8SEvalZero */
575*150812a8SEvalZero __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
576*150812a8SEvalZero #endif
577*150812a8SEvalZero return ((uint8_t) result); /* Add explicit type cast here */
578*150812a8SEvalZero }
579*150812a8SEvalZero
580*150812a8SEvalZero
581*150812a8SEvalZero /**
582*150812a8SEvalZero \brief LDR Exclusive (16 bit)
583*150812a8SEvalZero \details Executes a exclusive LDR instruction for 16 bit values.
584*150812a8SEvalZero \param [in] ptr Pointer to data
585*150812a8SEvalZero \return value of type uint16_t at (*ptr)
586*150812a8SEvalZero */
__LDREXH(volatile uint16_t * addr)587*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDREXH(volatile uint16_t *addr)
588*150812a8SEvalZero {
589*150812a8SEvalZero uint32_t result;
590*150812a8SEvalZero
591*150812a8SEvalZero #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
592*150812a8SEvalZero __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) );
593*150812a8SEvalZero #else
594*150812a8SEvalZero /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
595*150812a8SEvalZero accepted by assembler. So has to use following less efficient pattern.
596*150812a8SEvalZero */
597*150812a8SEvalZero __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
598*150812a8SEvalZero #endif
599*150812a8SEvalZero return ((uint16_t) result); /* Add explicit type cast here */
600*150812a8SEvalZero }
601*150812a8SEvalZero
602*150812a8SEvalZero
603*150812a8SEvalZero /**
604*150812a8SEvalZero \brief LDR Exclusive (32 bit)
605*150812a8SEvalZero \details Executes a exclusive LDR instruction for 32 bit values.
606*150812a8SEvalZero \param [in] ptr Pointer to data
607*150812a8SEvalZero \return value of type uint32_t at (*ptr)
608*150812a8SEvalZero */
__LDREXW(volatile uint32_t * addr)609*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDREXW(volatile uint32_t *addr)
610*150812a8SEvalZero {
611*150812a8SEvalZero uint32_t result;
612*150812a8SEvalZero
613*150812a8SEvalZero __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) );
614*150812a8SEvalZero return(result);
615*150812a8SEvalZero }
616*150812a8SEvalZero
617*150812a8SEvalZero
618*150812a8SEvalZero /**
619*150812a8SEvalZero \brief STR Exclusive (8 bit)
620*150812a8SEvalZero \details Executes a exclusive STR instruction for 8 bit values.
621*150812a8SEvalZero \param [in] value Value to store
622*150812a8SEvalZero \param [in] ptr Pointer to location
623*150812a8SEvalZero \return 0 Function succeeded
624*150812a8SEvalZero \return 1 Function failed
625*150812a8SEvalZero */
__STREXB(uint8_t value,volatile uint8_t * addr)626*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr)
627*150812a8SEvalZero {
628*150812a8SEvalZero uint32_t result;
629*150812a8SEvalZero
630*150812a8SEvalZero __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
631*150812a8SEvalZero return(result);
632*150812a8SEvalZero }
633*150812a8SEvalZero
634*150812a8SEvalZero
635*150812a8SEvalZero /**
636*150812a8SEvalZero \brief STR Exclusive (16 bit)
637*150812a8SEvalZero \details Executes a exclusive STR instruction for 16 bit values.
638*150812a8SEvalZero \param [in] value Value to store
639*150812a8SEvalZero \param [in] ptr Pointer to location
640*150812a8SEvalZero \return 0 Function succeeded
641*150812a8SEvalZero \return 1 Function failed
642*150812a8SEvalZero */
__STREXH(uint16_t value,volatile uint16_t * addr)643*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr)
644*150812a8SEvalZero {
645*150812a8SEvalZero uint32_t result;
646*150812a8SEvalZero
647*150812a8SEvalZero __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) );
648*150812a8SEvalZero return(result);
649*150812a8SEvalZero }
650*150812a8SEvalZero
651*150812a8SEvalZero
652*150812a8SEvalZero /**
653*150812a8SEvalZero \brief STR Exclusive (32 bit)
654*150812a8SEvalZero \details Executes a exclusive STR instruction for 32 bit values.
655*150812a8SEvalZero \param [in] value Value to store
656*150812a8SEvalZero \param [in] ptr Pointer to location
657*150812a8SEvalZero \return 0 Function succeeded
658*150812a8SEvalZero \return 1 Function failed
659*150812a8SEvalZero */
__STREXW(uint32_t value,volatile uint32_t * addr)660*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr)
661*150812a8SEvalZero {
662*150812a8SEvalZero uint32_t result;
663*150812a8SEvalZero
664*150812a8SEvalZero __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) );
665*150812a8SEvalZero return(result);
666*150812a8SEvalZero }
667*150812a8SEvalZero
668*150812a8SEvalZero
669*150812a8SEvalZero /**
670*150812a8SEvalZero \brief Remove the exclusive lock
671*150812a8SEvalZero \details Removes the exclusive lock which is created by LDREX.
672*150812a8SEvalZero */
__CLREX(void)673*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __CLREX(void)
674*150812a8SEvalZero {
675*150812a8SEvalZero __ASM volatile ("clrex" ::: "memory");
676*150812a8SEvalZero }
677*150812a8SEvalZero
678*150812a8SEvalZero
679*150812a8SEvalZero /**
680*150812a8SEvalZero \brief Signed Saturate
681*150812a8SEvalZero \details Saturates a signed value.
682*150812a8SEvalZero \param [in] value Value to be saturated
683*150812a8SEvalZero \param [in] sat Bit position to saturate to (1..32)
684*150812a8SEvalZero \return Saturated value
685*150812a8SEvalZero */
686*150812a8SEvalZero #define __SSAT(ARG1,ARG2) \
687*150812a8SEvalZero ({ \
688*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1); \
689*150812a8SEvalZero __ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
690*150812a8SEvalZero __RES; \
691*150812a8SEvalZero })
692*150812a8SEvalZero
693*150812a8SEvalZero
694*150812a8SEvalZero /**
695*150812a8SEvalZero \brief Unsigned Saturate
696*150812a8SEvalZero \details Saturates an unsigned value.
697*150812a8SEvalZero \param [in] value Value to be saturated
698*150812a8SEvalZero \param [in] sat Bit position to saturate to (0..31)
699*150812a8SEvalZero \return Saturated value
700*150812a8SEvalZero */
701*150812a8SEvalZero #define __USAT(ARG1,ARG2) \
702*150812a8SEvalZero ({ \
703*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1); \
704*150812a8SEvalZero __ASM ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
705*150812a8SEvalZero __RES; \
706*150812a8SEvalZero })
707*150812a8SEvalZero
708*150812a8SEvalZero
709*150812a8SEvalZero /**
710*150812a8SEvalZero \brief Rotate Right with Extend (32 bit)
711*150812a8SEvalZero \details Moves each bit of a bitstring right by one bit.
712*150812a8SEvalZero The carry input is shifted in at the left end of the bitstring.
713*150812a8SEvalZero \param [in] value Value to rotate
714*150812a8SEvalZero \return Rotated value
715*150812a8SEvalZero */
__RRX(uint32_t value)716*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __RRX(uint32_t value)
717*150812a8SEvalZero {
718*150812a8SEvalZero uint32_t result;
719*150812a8SEvalZero
720*150812a8SEvalZero __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) );
721*150812a8SEvalZero return(result);
722*150812a8SEvalZero }
723*150812a8SEvalZero
724*150812a8SEvalZero
725*150812a8SEvalZero /**
726*150812a8SEvalZero \brief LDRT Unprivileged (8 bit)
727*150812a8SEvalZero \details Executes a Unprivileged LDRT instruction for 8 bit value.
728*150812a8SEvalZero \param [in] ptr Pointer to data
729*150812a8SEvalZero \return value of type uint8_t at (*ptr)
730*150812a8SEvalZero */
__LDRBT(volatile uint8_t * addr)731*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint8_t __LDRBT(volatile uint8_t *addr)
732*150812a8SEvalZero {
733*150812a8SEvalZero uint32_t result;
734*150812a8SEvalZero
735*150812a8SEvalZero #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
736*150812a8SEvalZero __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*addr) );
737*150812a8SEvalZero #else
738*150812a8SEvalZero /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
739*150812a8SEvalZero accepted by assembler. So has to use following less efficient pattern.
740*150812a8SEvalZero */
741*150812a8SEvalZero __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
742*150812a8SEvalZero #endif
743*150812a8SEvalZero return ((uint8_t) result); /* Add explicit type cast here */
744*150812a8SEvalZero }
745*150812a8SEvalZero
746*150812a8SEvalZero
747*150812a8SEvalZero /**
748*150812a8SEvalZero \brief LDRT Unprivileged (16 bit)
749*150812a8SEvalZero \details Executes a Unprivileged LDRT instruction for 16 bit values.
750*150812a8SEvalZero \param [in] ptr Pointer to data
751*150812a8SEvalZero \return value of type uint16_t at (*ptr)
752*150812a8SEvalZero */
__LDRHT(volatile uint16_t * addr)753*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint16_t __LDRHT(volatile uint16_t *addr)
754*150812a8SEvalZero {
755*150812a8SEvalZero uint32_t result;
756*150812a8SEvalZero
757*150812a8SEvalZero #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)
758*150812a8SEvalZero __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*addr) );
759*150812a8SEvalZero #else
760*150812a8SEvalZero /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not
761*150812a8SEvalZero accepted by assembler. So has to use following less efficient pattern.
762*150812a8SEvalZero */
763*150812a8SEvalZero __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (addr) : "memory" );
764*150812a8SEvalZero #endif
765*150812a8SEvalZero return ((uint16_t) result); /* Add explicit type cast here */
766*150812a8SEvalZero }
767*150812a8SEvalZero
768*150812a8SEvalZero
769*150812a8SEvalZero /**
770*150812a8SEvalZero \brief LDRT Unprivileged (32 bit)
771*150812a8SEvalZero \details Executes a Unprivileged LDRT instruction for 32 bit values.
772*150812a8SEvalZero \param [in] ptr Pointer to data
773*150812a8SEvalZero \return value of type uint32_t at (*ptr)
774*150812a8SEvalZero */
__LDRT(volatile uint32_t * addr)775*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE uint32_t __LDRT(volatile uint32_t *addr)
776*150812a8SEvalZero {
777*150812a8SEvalZero uint32_t result;
778*150812a8SEvalZero
779*150812a8SEvalZero __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*addr) );
780*150812a8SEvalZero return(result);
781*150812a8SEvalZero }
782*150812a8SEvalZero
783*150812a8SEvalZero
784*150812a8SEvalZero /**
785*150812a8SEvalZero \brief STRT Unprivileged (8 bit)
786*150812a8SEvalZero \details Executes a Unprivileged STRT instruction for 8 bit values.
787*150812a8SEvalZero \param [in] value Value to store
788*150812a8SEvalZero \param [in] ptr Pointer to location
789*150812a8SEvalZero */
__STRBT(uint8_t value,volatile uint8_t * addr)790*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STRBT(uint8_t value, volatile uint8_t *addr)
791*150812a8SEvalZero {
792*150812a8SEvalZero __ASM volatile ("strbt %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
793*150812a8SEvalZero }
794*150812a8SEvalZero
795*150812a8SEvalZero
796*150812a8SEvalZero /**
797*150812a8SEvalZero \brief STRT Unprivileged (16 bit)
798*150812a8SEvalZero \details Executes a Unprivileged STRT instruction for 16 bit values.
799*150812a8SEvalZero \param [in] value Value to store
800*150812a8SEvalZero \param [in] ptr Pointer to location
801*150812a8SEvalZero */
__STRHT(uint16_t value,volatile uint16_t * addr)802*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STRHT(uint16_t value, volatile uint16_t *addr)
803*150812a8SEvalZero {
804*150812a8SEvalZero __ASM volatile ("strht %1, %0" : "=Q" (*addr) : "r" ((uint32_t)value) );
805*150812a8SEvalZero }
806*150812a8SEvalZero
807*150812a8SEvalZero
808*150812a8SEvalZero /**
809*150812a8SEvalZero \brief STRT Unprivileged (32 bit)
810*150812a8SEvalZero \details Executes a Unprivileged STRT instruction for 32 bit values.
811*150812a8SEvalZero \param [in] value Value to store
812*150812a8SEvalZero \param [in] ptr Pointer to location
813*150812a8SEvalZero */
__STRT(uint32_t value,volatile uint32_t * addr)814*150812a8SEvalZero __attribute__((always_inline)) __STATIC_INLINE void __STRT(uint32_t value, volatile uint32_t *addr)
815*150812a8SEvalZero {
816*150812a8SEvalZero __ASM volatile ("strt %1, %0" : "=Q" (*addr) : "r" (value) );
817*150812a8SEvalZero }
818*150812a8SEvalZero
819*150812a8SEvalZero #endif /* (__CORTEX_M >= 0x03U) || (__CORTEX_SC >= 300U) */
820*150812a8SEvalZero
821*150812a8SEvalZero /*@}*/ /* end of group CMSIS_Core_InstructionInterface */
822*150812a8SEvalZero
823*150812a8SEvalZero
824*150812a8SEvalZero /* ################### Compiler specific Intrinsics ########################### */
825*150812a8SEvalZero /** \defgroup CMSIS_SIMD_intrinsics CMSIS SIMD Intrinsics
826*150812a8SEvalZero Access to dedicated SIMD instructions
827*150812a8SEvalZero @{
828*150812a8SEvalZero */
829*150812a8SEvalZero
830*150812a8SEvalZero #if (__CORTEX_M >= 0x04U) /* only for Cortex-M4 and above */
831*150812a8SEvalZero
__SADD8(uint32_t op1,uint32_t op2)832*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD8(uint32_t op1, uint32_t op2)
833*150812a8SEvalZero {
834*150812a8SEvalZero uint32_t result;
835*150812a8SEvalZero
836*150812a8SEvalZero __ASM volatile ("sadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
837*150812a8SEvalZero return(result);
838*150812a8SEvalZero }
839*150812a8SEvalZero
__QADD8(uint32_t op1,uint32_t op2)840*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD8(uint32_t op1, uint32_t op2)
841*150812a8SEvalZero {
842*150812a8SEvalZero uint32_t result;
843*150812a8SEvalZero
844*150812a8SEvalZero __ASM volatile ("qadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
845*150812a8SEvalZero return(result);
846*150812a8SEvalZero }
847*150812a8SEvalZero
__SHADD8(uint32_t op1,uint32_t op2)848*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD8(uint32_t op1, uint32_t op2)
849*150812a8SEvalZero {
850*150812a8SEvalZero uint32_t result;
851*150812a8SEvalZero
852*150812a8SEvalZero __ASM volatile ("shadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
853*150812a8SEvalZero return(result);
854*150812a8SEvalZero }
855*150812a8SEvalZero
__UADD8(uint32_t op1,uint32_t op2)856*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD8(uint32_t op1, uint32_t op2)
857*150812a8SEvalZero {
858*150812a8SEvalZero uint32_t result;
859*150812a8SEvalZero
860*150812a8SEvalZero __ASM volatile ("uadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
861*150812a8SEvalZero return(result);
862*150812a8SEvalZero }
863*150812a8SEvalZero
__UQADD8(uint32_t op1,uint32_t op2)864*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD8(uint32_t op1, uint32_t op2)
865*150812a8SEvalZero {
866*150812a8SEvalZero uint32_t result;
867*150812a8SEvalZero
868*150812a8SEvalZero __ASM volatile ("uqadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
869*150812a8SEvalZero return(result);
870*150812a8SEvalZero }
871*150812a8SEvalZero
__UHADD8(uint32_t op1,uint32_t op2)872*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD8(uint32_t op1, uint32_t op2)
873*150812a8SEvalZero {
874*150812a8SEvalZero uint32_t result;
875*150812a8SEvalZero
876*150812a8SEvalZero __ASM volatile ("uhadd8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
877*150812a8SEvalZero return(result);
878*150812a8SEvalZero }
879*150812a8SEvalZero
880*150812a8SEvalZero
__SSUB8(uint32_t op1,uint32_t op2)881*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB8(uint32_t op1, uint32_t op2)
882*150812a8SEvalZero {
883*150812a8SEvalZero uint32_t result;
884*150812a8SEvalZero
885*150812a8SEvalZero __ASM volatile ("ssub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
886*150812a8SEvalZero return(result);
887*150812a8SEvalZero }
888*150812a8SEvalZero
__QSUB8(uint32_t op1,uint32_t op2)889*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB8(uint32_t op1, uint32_t op2)
890*150812a8SEvalZero {
891*150812a8SEvalZero uint32_t result;
892*150812a8SEvalZero
893*150812a8SEvalZero __ASM volatile ("qsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
894*150812a8SEvalZero return(result);
895*150812a8SEvalZero }
896*150812a8SEvalZero
__SHSUB8(uint32_t op1,uint32_t op2)897*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB8(uint32_t op1, uint32_t op2)
898*150812a8SEvalZero {
899*150812a8SEvalZero uint32_t result;
900*150812a8SEvalZero
901*150812a8SEvalZero __ASM volatile ("shsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
902*150812a8SEvalZero return(result);
903*150812a8SEvalZero }
904*150812a8SEvalZero
__USUB8(uint32_t op1,uint32_t op2)905*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB8(uint32_t op1, uint32_t op2)
906*150812a8SEvalZero {
907*150812a8SEvalZero uint32_t result;
908*150812a8SEvalZero
909*150812a8SEvalZero __ASM volatile ("usub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
910*150812a8SEvalZero return(result);
911*150812a8SEvalZero }
912*150812a8SEvalZero
__UQSUB8(uint32_t op1,uint32_t op2)913*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB8(uint32_t op1, uint32_t op2)
914*150812a8SEvalZero {
915*150812a8SEvalZero uint32_t result;
916*150812a8SEvalZero
917*150812a8SEvalZero __ASM volatile ("uqsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
918*150812a8SEvalZero return(result);
919*150812a8SEvalZero }
920*150812a8SEvalZero
__UHSUB8(uint32_t op1,uint32_t op2)921*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB8(uint32_t op1, uint32_t op2)
922*150812a8SEvalZero {
923*150812a8SEvalZero uint32_t result;
924*150812a8SEvalZero
925*150812a8SEvalZero __ASM volatile ("uhsub8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
926*150812a8SEvalZero return(result);
927*150812a8SEvalZero }
928*150812a8SEvalZero
929*150812a8SEvalZero
__SADD16(uint32_t op1,uint32_t op2)930*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SADD16(uint32_t op1, uint32_t op2)
931*150812a8SEvalZero {
932*150812a8SEvalZero uint32_t result;
933*150812a8SEvalZero
934*150812a8SEvalZero __ASM volatile ("sadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
935*150812a8SEvalZero return(result);
936*150812a8SEvalZero }
937*150812a8SEvalZero
__QADD16(uint32_t op1,uint32_t op2)938*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QADD16(uint32_t op1, uint32_t op2)
939*150812a8SEvalZero {
940*150812a8SEvalZero uint32_t result;
941*150812a8SEvalZero
942*150812a8SEvalZero __ASM volatile ("qadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
943*150812a8SEvalZero return(result);
944*150812a8SEvalZero }
945*150812a8SEvalZero
__SHADD16(uint32_t op1,uint32_t op2)946*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHADD16(uint32_t op1, uint32_t op2)
947*150812a8SEvalZero {
948*150812a8SEvalZero uint32_t result;
949*150812a8SEvalZero
950*150812a8SEvalZero __ASM volatile ("shadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
951*150812a8SEvalZero return(result);
952*150812a8SEvalZero }
953*150812a8SEvalZero
__UADD16(uint32_t op1,uint32_t op2)954*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UADD16(uint32_t op1, uint32_t op2)
955*150812a8SEvalZero {
956*150812a8SEvalZero uint32_t result;
957*150812a8SEvalZero
958*150812a8SEvalZero __ASM volatile ("uadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
959*150812a8SEvalZero return(result);
960*150812a8SEvalZero }
961*150812a8SEvalZero
__UQADD16(uint32_t op1,uint32_t op2)962*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQADD16(uint32_t op1, uint32_t op2)
963*150812a8SEvalZero {
964*150812a8SEvalZero uint32_t result;
965*150812a8SEvalZero
966*150812a8SEvalZero __ASM volatile ("uqadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
967*150812a8SEvalZero return(result);
968*150812a8SEvalZero }
969*150812a8SEvalZero
__UHADD16(uint32_t op1,uint32_t op2)970*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHADD16(uint32_t op1, uint32_t op2)
971*150812a8SEvalZero {
972*150812a8SEvalZero uint32_t result;
973*150812a8SEvalZero
974*150812a8SEvalZero __ASM volatile ("uhadd16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
975*150812a8SEvalZero return(result);
976*150812a8SEvalZero }
977*150812a8SEvalZero
__SSUB16(uint32_t op1,uint32_t op2)978*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSUB16(uint32_t op1, uint32_t op2)
979*150812a8SEvalZero {
980*150812a8SEvalZero uint32_t result;
981*150812a8SEvalZero
982*150812a8SEvalZero __ASM volatile ("ssub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
983*150812a8SEvalZero return(result);
984*150812a8SEvalZero }
985*150812a8SEvalZero
__QSUB16(uint32_t op1,uint32_t op2)986*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSUB16(uint32_t op1, uint32_t op2)
987*150812a8SEvalZero {
988*150812a8SEvalZero uint32_t result;
989*150812a8SEvalZero
990*150812a8SEvalZero __ASM volatile ("qsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
991*150812a8SEvalZero return(result);
992*150812a8SEvalZero }
993*150812a8SEvalZero
__SHSUB16(uint32_t op1,uint32_t op2)994*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSUB16(uint32_t op1, uint32_t op2)
995*150812a8SEvalZero {
996*150812a8SEvalZero uint32_t result;
997*150812a8SEvalZero
998*150812a8SEvalZero __ASM volatile ("shsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
999*150812a8SEvalZero return(result);
1000*150812a8SEvalZero }
1001*150812a8SEvalZero
__USUB16(uint32_t op1,uint32_t op2)1002*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USUB16(uint32_t op1, uint32_t op2)
1003*150812a8SEvalZero {
1004*150812a8SEvalZero uint32_t result;
1005*150812a8SEvalZero
1006*150812a8SEvalZero __ASM volatile ("usub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1007*150812a8SEvalZero return(result);
1008*150812a8SEvalZero }
1009*150812a8SEvalZero
__UQSUB16(uint32_t op1,uint32_t op2)1010*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSUB16(uint32_t op1, uint32_t op2)
1011*150812a8SEvalZero {
1012*150812a8SEvalZero uint32_t result;
1013*150812a8SEvalZero
1014*150812a8SEvalZero __ASM volatile ("uqsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1015*150812a8SEvalZero return(result);
1016*150812a8SEvalZero }
1017*150812a8SEvalZero
__UHSUB16(uint32_t op1,uint32_t op2)1018*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSUB16(uint32_t op1, uint32_t op2)
1019*150812a8SEvalZero {
1020*150812a8SEvalZero uint32_t result;
1021*150812a8SEvalZero
1022*150812a8SEvalZero __ASM volatile ("uhsub16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1023*150812a8SEvalZero return(result);
1024*150812a8SEvalZero }
1025*150812a8SEvalZero
__SASX(uint32_t op1,uint32_t op2)1026*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SASX(uint32_t op1, uint32_t op2)
1027*150812a8SEvalZero {
1028*150812a8SEvalZero uint32_t result;
1029*150812a8SEvalZero
1030*150812a8SEvalZero __ASM volatile ("sasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1031*150812a8SEvalZero return(result);
1032*150812a8SEvalZero }
1033*150812a8SEvalZero
__QASX(uint32_t op1,uint32_t op2)1034*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QASX(uint32_t op1, uint32_t op2)
1035*150812a8SEvalZero {
1036*150812a8SEvalZero uint32_t result;
1037*150812a8SEvalZero
1038*150812a8SEvalZero __ASM volatile ("qasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1039*150812a8SEvalZero return(result);
1040*150812a8SEvalZero }
1041*150812a8SEvalZero
__SHASX(uint32_t op1,uint32_t op2)1042*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHASX(uint32_t op1, uint32_t op2)
1043*150812a8SEvalZero {
1044*150812a8SEvalZero uint32_t result;
1045*150812a8SEvalZero
1046*150812a8SEvalZero __ASM volatile ("shasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1047*150812a8SEvalZero return(result);
1048*150812a8SEvalZero }
1049*150812a8SEvalZero
__UASX(uint32_t op1,uint32_t op2)1050*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UASX(uint32_t op1, uint32_t op2)
1051*150812a8SEvalZero {
1052*150812a8SEvalZero uint32_t result;
1053*150812a8SEvalZero
1054*150812a8SEvalZero __ASM volatile ("uasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1055*150812a8SEvalZero return(result);
1056*150812a8SEvalZero }
1057*150812a8SEvalZero
__UQASX(uint32_t op1,uint32_t op2)1058*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQASX(uint32_t op1, uint32_t op2)
1059*150812a8SEvalZero {
1060*150812a8SEvalZero uint32_t result;
1061*150812a8SEvalZero
1062*150812a8SEvalZero __ASM volatile ("uqasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1063*150812a8SEvalZero return(result);
1064*150812a8SEvalZero }
1065*150812a8SEvalZero
__UHASX(uint32_t op1,uint32_t op2)1066*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHASX(uint32_t op1, uint32_t op2)
1067*150812a8SEvalZero {
1068*150812a8SEvalZero uint32_t result;
1069*150812a8SEvalZero
1070*150812a8SEvalZero __ASM volatile ("uhasx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1071*150812a8SEvalZero return(result);
1072*150812a8SEvalZero }
1073*150812a8SEvalZero
__SSAX(uint32_t op1,uint32_t op2)1074*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SSAX(uint32_t op1, uint32_t op2)
1075*150812a8SEvalZero {
1076*150812a8SEvalZero uint32_t result;
1077*150812a8SEvalZero
1078*150812a8SEvalZero __ASM volatile ("ssax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1079*150812a8SEvalZero return(result);
1080*150812a8SEvalZero }
1081*150812a8SEvalZero
__QSAX(uint32_t op1,uint32_t op2)1082*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __QSAX(uint32_t op1, uint32_t op2)
1083*150812a8SEvalZero {
1084*150812a8SEvalZero uint32_t result;
1085*150812a8SEvalZero
1086*150812a8SEvalZero __ASM volatile ("qsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1087*150812a8SEvalZero return(result);
1088*150812a8SEvalZero }
1089*150812a8SEvalZero
__SHSAX(uint32_t op1,uint32_t op2)1090*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SHSAX(uint32_t op1, uint32_t op2)
1091*150812a8SEvalZero {
1092*150812a8SEvalZero uint32_t result;
1093*150812a8SEvalZero
1094*150812a8SEvalZero __ASM volatile ("shsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1095*150812a8SEvalZero return(result);
1096*150812a8SEvalZero }
1097*150812a8SEvalZero
__USAX(uint32_t op1,uint32_t op2)1098*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAX(uint32_t op1, uint32_t op2)
1099*150812a8SEvalZero {
1100*150812a8SEvalZero uint32_t result;
1101*150812a8SEvalZero
1102*150812a8SEvalZero __ASM volatile ("usax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1103*150812a8SEvalZero return(result);
1104*150812a8SEvalZero }
1105*150812a8SEvalZero
__UQSAX(uint32_t op1,uint32_t op2)1106*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UQSAX(uint32_t op1, uint32_t op2)
1107*150812a8SEvalZero {
1108*150812a8SEvalZero uint32_t result;
1109*150812a8SEvalZero
1110*150812a8SEvalZero __ASM volatile ("uqsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1111*150812a8SEvalZero return(result);
1112*150812a8SEvalZero }
1113*150812a8SEvalZero
__UHSAX(uint32_t op1,uint32_t op2)1114*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UHSAX(uint32_t op1, uint32_t op2)
1115*150812a8SEvalZero {
1116*150812a8SEvalZero uint32_t result;
1117*150812a8SEvalZero
1118*150812a8SEvalZero __ASM volatile ("uhsax %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1119*150812a8SEvalZero return(result);
1120*150812a8SEvalZero }
1121*150812a8SEvalZero
__USAD8(uint32_t op1,uint32_t op2)1122*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USAD8(uint32_t op1, uint32_t op2)
1123*150812a8SEvalZero {
1124*150812a8SEvalZero uint32_t result;
1125*150812a8SEvalZero
1126*150812a8SEvalZero __ASM volatile ("usad8 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1127*150812a8SEvalZero return(result);
1128*150812a8SEvalZero }
1129*150812a8SEvalZero
__USADA8(uint32_t op1,uint32_t op2,uint32_t op3)1130*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __USADA8(uint32_t op1, uint32_t op2, uint32_t op3)
1131*150812a8SEvalZero {
1132*150812a8SEvalZero uint32_t result;
1133*150812a8SEvalZero
1134*150812a8SEvalZero __ASM volatile ("usada8 %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1135*150812a8SEvalZero return(result);
1136*150812a8SEvalZero }
1137*150812a8SEvalZero
1138*150812a8SEvalZero #define __SSAT16(ARG1,ARG2) \
1139*150812a8SEvalZero ({ \
1140*150812a8SEvalZero int32_t __RES, __ARG1 = (ARG1); \
1141*150812a8SEvalZero __ASM ("ssat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1142*150812a8SEvalZero __RES; \
1143*150812a8SEvalZero })
1144*150812a8SEvalZero
1145*150812a8SEvalZero #define __USAT16(ARG1,ARG2) \
1146*150812a8SEvalZero ({ \
1147*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1); \
1148*150812a8SEvalZero __ASM ("usat16 %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
1149*150812a8SEvalZero __RES; \
1150*150812a8SEvalZero })
1151*150812a8SEvalZero
__UXTB16(uint32_t op1)1152*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTB16(uint32_t op1)
1153*150812a8SEvalZero {
1154*150812a8SEvalZero uint32_t result;
1155*150812a8SEvalZero
1156*150812a8SEvalZero __ASM volatile ("uxtb16 %0, %1" : "=r" (result) : "r" (op1));
1157*150812a8SEvalZero return(result);
1158*150812a8SEvalZero }
1159*150812a8SEvalZero
__UXTAB16(uint32_t op1,uint32_t op2)1160*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __UXTAB16(uint32_t op1, uint32_t op2)
1161*150812a8SEvalZero {
1162*150812a8SEvalZero uint32_t result;
1163*150812a8SEvalZero
1164*150812a8SEvalZero __ASM volatile ("uxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1165*150812a8SEvalZero return(result);
1166*150812a8SEvalZero }
1167*150812a8SEvalZero
__SXTB16(uint32_t op1)1168*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTB16(uint32_t op1)
1169*150812a8SEvalZero {
1170*150812a8SEvalZero uint32_t result;
1171*150812a8SEvalZero
1172*150812a8SEvalZero __ASM volatile ("sxtb16 %0, %1" : "=r" (result) : "r" (op1));
1173*150812a8SEvalZero return(result);
1174*150812a8SEvalZero }
1175*150812a8SEvalZero
__SXTAB16(uint32_t op1,uint32_t op2)1176*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SXTAB16(uint32_t op1, uint32_t op2)
1177*150812a8SEvalZero {
1178*150812a8SEvalZero uint32_t result;
1179*150812a8SEvalZero
1180*150812a8SEvalZero __ASM volatile ("sxtab16 %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1181*150812a8SEvalZero return(result);
1182*150812a8SEvalZero }
1183*150812a8SEvalZero
__SMUAD(uint32_t op1,uint32_t op2)1184*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUAD (uint32_t op1, uint32_t op2)
1185*150812a8SEvalZero {
1186*150812a8SEvalZero uint32_t result;
1187*150812a8SEvalZero
1188*150812a8SEvalZero __ASM volatile ("smuad %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1189*150812a8SEvalZero return(result);
1190*150812a8SEvalZero }
1191*150812a8SEvalZero
__SMUADX(uint32_t op1,uint32_t op2)1192*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUADX (uint32_t op1, uint32_t op2)
1193*150812a8SEvalZero {
1194*150812a8SEvalZero uint32_t result;
1195*150812a8SEvalZero
1196*150812a8SEvalZero __ASM volatile ("smuadx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1197*150812a8SEvalZero return(result);
1198*150812a8SEvalZero }
1199*150812a8SEvalZero
__SMLAD(uint32_t op1,uint32_t op2,uint32_t op3)1200*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLAD (uint32_t op1, uint32_t op2, uint32_t op3)
1201*150812a8SEvalZero {
1202*150812a8SEvalZero uint32_t result;
1203*150812a8SEvalZero
1204*150812a8SEvalZero __ASM volatile ("smlad %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1205*150812a8SEvalZero return(result);
1206*150812a8SEvalZero }
1207*150812a8SEvalZero
__SMLADX(uint32_t op1,uint32_t op2,uint32_t op3)1208*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLADX (uint32_t op1, uint32_t op2, uint32_t op3)
1209*150812a8SEvalZero {
1210*150812a8SEvalZero uint32_t result;
1211*150812a8SEvalZero
1212*150812a8SEvalZero __ASM volatile ("smladx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1213*150812a8SEvalZero return(result);
1214*150812a8SEvalZero }
1215*150812a8SEvalZero
__SMLALD(uint32_t op1,uint32_t op2,uint64_t acc)1216*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALD (uint32_t op1, uint32_t op2, uint64_t acc)
1217*150812a8SEvalZero {
1218*150812a8SEvalZero union llreg_u{
1219*150812a8SEvalZero uint32_t w32[2];
1220*150812a8SEvalZero uint64_t w64;
1221*150812a8SEvalZero } llr;
1222*150812a8SEvalZero llr.w64 = acc;
1223*150812a8SEvalZero
1224*150812a8SEvalZero #ifndef __ARMEB__ /* Little endian */
1225*150812a8SEvalZero __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1226*150812a8SEvalZero #else /* Big endian */
1227*150812a8SEvalZero __ASM volatile ("smlald %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1228*150812a8SEvalZero #endif
1229*150812a8SEvalZero
1230*150812a8SEvalZero return(llr.w64);
1231*150812a8SEvalZero }
1232*150812a8SEvalZero
__SMLALDX(uint32_t op1,uint32_t op2,uint64_t acc)1233*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLALDX (uint32_t op1, uint32_t op2, uint64_t acc)
1234*150812a8SEvalZero {
1235*150812a8SEvalZero union llreg_u{
1236*150812a8SEvalZero uint32_t w32[2];
1237*150812a8SEvalZero uint64_t w64;
1238*150812a8SEvalZero } llr;
1239*150812a8SEvalZero llr.w64 = acc;
1240*150812a8SEvalZero
1241*150812a8SEvalZero #ifndef __ARMEB__ /* Little endian */
1242*150812a8SEvalZero __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1243*150812a8SEvalZero #else /* Big endian */
1244*150812a8SEvalZero __ASM volatile ("smlaldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1245*150812a8SEvalZero #endif
1246*150812a8SEvalZero
1247*150812a8SEvalZero return(llr.w64);
1248*150812a8SEvalZero }
1249*150812a8SEvalZero
__SMUSD(uint32_t op1,uint32_t op2)1250*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSD (uint32_t op1, uint32_t op2)
1251*150812a8SEvalZero {
1252*150812a8SEvalZero uint32_t result;
1253*150812a8SEvalZero
1254*150812a8SEvalZero __ASM volatile ("smusd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1255*150812a8SEvalZero return(result);
1256*150812a8SEvalZero }
1257*150812a8SEvalZero
__SMUSDX(uint32_t op1,uint32_t op2)1258*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMUSDX (uint32_t op1, uint32_t op2)
1259*150812a8SEvalZero {
1260*150812a8SEvalZero uint32_t result;
1261*150812a8SEvalZero
1262*150812a8SEvalZero __ASM volatile ("smusdx %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1263*150812a8SEvalZero return(result);
1264*150812a8SEvalZero }
1265*150812a8SEvalZero
__SMLSD(uint32_t op1,uint32_t op2,uint32_t op3)1266*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSD (uint32_t op1, uint32_t op2, uint32_t op3)
1267*150812a8SEvalZero {
1268*150812a8SEvalZero uint32_t result;
1269*150812a8SEvalZero
1270*150812a8SEvalZero __ASM volatile ("smlsd %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1271*150812a8SEvalZero return(result);
1272*150812a8SEvalZero }
1273*150812a8SEvalZero
__SMLSDX(uint32_t op1,uint32_t op2,uint32_t op3)1274*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMLSDX (uint32_t op1, uint32_t op2, uint32_t op3)
1275*150812a8SEvalZero {
1276*150812a8SEvalZero uint32_t result;
1277*150812a8SEvalZero
1278*150812a8SEvalZero __ASM volatile ("smlsdx %0, %1, %2, %3" : "=r" (result) : "r" (op1), "r" (op2), "r" (op3) );
1279*150812a8SEvalZero return(result);
1280*150812a8SEvalZero }
1281*150812a8SEvalZero
__SMLSLD(uint32_t op1,uint32_t op2,uint64_t acc)1282*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLD (uint32_t op1, uint32_t op2, uint64_t acc)
1283*150812a8SEvalZero {
1284*150812a8SEvalZero union llreg_u{
1285*150812a8SEvalZero uint32_t w32[2];
1286*150812a8SEvalZero uint64_t w64;
1287*150812a8SEvalZero } llr;
1288*150812a8SEvalZero llr.w64 = acc;
1289*150812a8SEvalZero
1290*150812a8SEvalZero #ifndef __ARMEB__ /* Little endian */
1291*150812a8SEvalZero __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1292*150812a8SEvalZero #else /* Big endian */
1293*150812a8SEvalZero __ASM volatile ("smlsld %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1294*150812a8SEvalZero #endif
1295*150812a8SEvalZero
1296*150812a8SEvalZero return(llr.w64);
1297*150812a8SEvalZero }
1298*150812a8SEvalZero
__SMLSLDX(uint32_t op1,uint32_t op2,uint64_t acc)1299*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint64_t __SMLSLDX (uint32_t op1, uint32_t op2, uint64_t acc)
1300*150812a8SEvalZero {
1301*150812a8SEvalZero union llreg_u{
1302*150812a8SEvalZero uint32_t w32[2];
1303*150812a8SEvalZero uint64_t w64;
1304*150812a8SEvalZero } llr;
1305*150812a8SEvalZero llr.w64 = acc;
1306*150812a8SEvalZero
1307*150812a8SEvalZero #ifndef __ARMEB__ /* Little endian */
1308*150812a8SEvalZero __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[0]), "=r" (llr.w32[1]): "r" (op1), "r" (op2) , "0" (llr.w32[0]), "1" (llr.w32[1]) );
1309*150812a8SEvalZero #else /* Big endian */
1310*150812a8SEvalZero __ASM volatile ("smlsldx %0, %1, %2, %3" : "=r" (llr.w32[1]), "=r" (llr.w32[0]): "r" (op1), "r" (op2) , "0" (llr.w32[1]), "1" (llr.w32[0]) );
1311*150812a8SEvalZero #endif
1312*150812a8SEvalZero
1313*150812a8SEvalZero return(llr.w64);
1314*150812a8SEvalZero }
1315*150812a8SEvalZero
__SEL(uint32_t op1,uint32_t op2)1316*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SEL (uint32_t op1, uint32_t op2)
1317*150812a8SEvalZero {
1318*150812a8SEvalZero uint32_t result;
1319*150812a8SEvalZero
1320*150812a8SEvalZero __ASM volatile ("sel %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1321*150812a8SEvalZero return(result);
1322*150812a8SEvalZero }
1323*150812a8SEvalZero
__QADD(int32_t op1,int32_t op2)1324*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QADD( int32_t op1, int32_t op2)
1325*150812a8SEvalZero {
1326*150812a8SEvalZero int32_t result;
1327*150812a8SEvalZero
1328*150812a8SEvalZero __ASM volatile ("qadd %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1329*150812a8SEvalZero return(result);
1330*150812a8SEvalZero }
1331*150812a8SEvalZero
__QSUB(int32_t op1,int32_t op2)1332*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE int32_t __QSUB( int32_t op1, int32_t op2)
1333*150812a8SEvalZero {
1334*150812a8SEvalZero int32_t result;
1335*150812a8SEvalZero
1336*150812a8SEvalZero __ASM volatile ("qsub %0, %1, %2" : "=r" (result) : "r" (op1), "r" (op2) );
1337*150812a8SEvalZero return(result);
1338*150812a8SEvalZero }
1339*150812a8SEvalZero
1340*150812a8SEvalZero #define __PKHBT(ARG1,ARG2,ARG3) \
1341*150812a8SEvalZero ({ \
1342*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1343*150812a8SEvalZero __ASM ("pkhbt %0, %1, %2, lsl %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
1344*150812a8SEvalZero __RES; \
1345*150812a8SEvalZero })
1346*150812a8SEvalZero
1347*150812a8SEvalZero #define __PKHTB(ARG1,ARG2,ARG3) \
1348*150812a8SEvalZero ({ \
1349*150812a8SEvalZero uint32_t __RES, __ARG1 = (ARG1), __ARG2 = (ARG2); \
1350*150812a8SEvalZero if (ARG3 == 0) \
1351*150812a8SEvalZero __ASM ("pkhtb %0, %1, %2" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2) ); \
1352*150812a8SEvalZero else \
1353*150812a8SEvalZero __ASM ("pkhtb %0, %1, %2, asr %3" : "=r" (__RES) : "r" (__ARG1), "r" (__ARG2), "I" (ARG3) ); \
1354*150812a8SEvalZero __RES; \
1355*150812a8SEvalZero })
1356*150812a8SEvalZero
__SMMLA(int32_t op1,int32_t op2,int32_t op3)1357*150812a8SEvalZero __attribute__( ( always_inline ) ) __STATIC_INLINE uint32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3)
1358*150812a8SEvalZero {
1359*150812a8SEvalZero int32_t result;
1360*150812a8SEvalZero
1361*150812a8SEvalZero __ASM volatile ("smmla %0, %1, %2, %3" : "=r" (result): "r" (op1), "r" (op2), "r" (op3) );
1362*150812a8SEvalZero return(result);
1363*150812a8SEvalZero }
1364*150812a8SEvalZero
1365*150812a8SEvalZero #endif /* (__CORTEX_M >= 0x04) */
1366*150812a8SEvalZero /*@} end of group CMSIS_SIMD_intrinsics */
1367*150812a8SEvalZero
1368*150812a8SEvalZero
1369*150812a8SEvalZero #if defined ( __GNUC__ )
1370*150812a8SEvalZero #pragma GCC diagnostic pop
1371*150812a8SEvalZero #endif
1372*150812a8SEvalZero
1373*150812a8SEvalZero #endif /* __CMSIS_GCC_H */
1374