rtems中断处理


./kernel2/rtems/cpukit/score/cpu/arm/arm_exc_interrupt.S 
[root@centos7 rtems]# ls ./kernel2/rtems/cpukit/score/cpu/arm/arm_exc_interrupt.S 
./kernel2/rtems/cpukit/score/cpu/arm/arm_exc_interrupt.S
[root@centos7 rtems]# ls ./kernel2/rtems/cpukit/
acinclude.m4  doxy-filter  doxygen.h  ftpd     libblock  libcsupport  libdl      libfs    libi2c  libmisc  librtemscxx    libtest   mghttpd  rtems  score    vc-key.sh
dev           doxygen      dtc        include  libcrypt  libdebugger  libdrvmgr  libgnat  libmd   libpci   libstdthreads  libtrace  posix    sapi   telnetd  zlib
[root@centos7 rtems]# ls ./kernel2/rtems/
bsps   c           cpukit    gccdeps.py   INSTALL  LICENSE.BSD-2-Clause  LICENSE.CC-BY-SA-4.0  LICENSE.JFFS2  LICENSE.RPCXDR  MAINTAINERS  Makefile.maint  rtems-bsps     spec        waf      yaml
build  config.ini  Doxyfile  gccdeps.pyc  LICENSE  LICENSE.BSD-3-Clause  LICENSE.GPL-2.0       LICENSE.NET    long_gcc.py     make         README          rtemslogo.png  testsuites  wscript
[root@centos7 rtems]# 
[root@centos7 rtems]# cat  ./kernel2/rtems/cpukit/score/cpu/arm/arm_exc_interrupt.S 
/**
 * @file
 *
 * @ingroup RTEMSScoreCPUARM
 *
 * @brief ARM interrupt exception prologue and epilogue.
 */

/*
 * Copyright (c) 2009, 2016 embedded brains GmbH.  All rights reserved.
 *
 *  embedded brains GmbH
 *  Dornierstr. 4
 *  82178 Puchheim
 *  Germany
 *  
 *
 * The license and distribution terms for this file may be
 * found in the file LICENSE in this distribution or at
 * http://www.rtems.org/license/LICENSE.
 */

/*
 * The upper EXCHANGE_SIZE bytes of the INT stack area are used for data
 * exchange between INT and SVC mode.  Below of this is the actual INT stack.
 * The exchange area is only accessed if INT is disabled.
 */

#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

#include 

#ifdef ARM_MULTILIB_ARCH_V4

#define EXCHANGE_LR r4
#define EXCHANGE_SPSR r5
#define EXCHANGE_CPSR r6
#define EXCHANGE_INT_SP r8

#define EXCHANGE_LIST {EXCHANGE_LR, EXCHANGE_SPSR, EXCHANGE_CPSR, EXCHANGE_INT_SP}
#define EXCHANGE_SIZE 16

#define SELF_CPU_CONTROL r7
#define NON_VOLATILE_SCRATCH r9

#define CONTEXT_LIST {r0, r1, r2, r3, EXCHANGE_LR, EXCHANGE_SPSR, SELF_CPU_CONTROL, r12}
#define CONTEXT_SIZE 32

.arm
.globl _ARMV4_Exception_interrupt
_ARMV4_Exception_interrupt:

        /* Save exchange registers to exchange area */
        stmdb   sp, EXCHANGE_LIST

        /* Set exchange registers */
        mov     EXCHANGE_LR, lr
        mrs     EXCHANGE_SPSR, SPSR
        mrs     EXCHANGE_CPSR, CPSR
        sub     EXCHANGE_INT_SP, sp, #EXCHANGE_SIZE

        /* Switch to SVC mode */
        orr     EXCHANGE_CPSR, EXCHANGE_CPSR, #0x1
        msr     CPSR_c, EXCHANGE_CPSR

        /*
         * Save context.  We save the link register separately because it has
         * to be restored in SVC mode.  The other registers can be restored in
         * INT mode.  Ensure that stack remains 8 byte aligned.  Use register
         * necessary for the stack alignment for the stack pointer of the
         * interrupted context.
         */
        stmdb   sp!, CONTEXT_LIST
        stmdb   sp!, {NON_VOLATILE_SCRATCH, lr}

#ifdef ARM_MULTILIB_VFP
        /* Save VFP context */
        vmrs    r0, FPSCR
        vstmdb  sp!, {d0-d7}
#ifdef ARM_MULTILIB_VFP_D32
        vstmdb  sp!, {d16-d31}
#endif
        stmdb   sp!, {r0, r1}
#endif /* ARM_MULTILIB_VFP */

        /* Get per-CPU control of current processor */
        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL

        /* Remember INT stack pointer */
        mov     r1, EXCHANGE_INT_SP

        /* Restore exchange registers from exchange area */
        ldmia   r1, EXCHANGE_LIST

        /* Get interrupt nest level */
        ldr     r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]

        /* Switch stack if necessary and save original stack pointer */
        mov     NON_VOLATILE_SCRATCH, sp
        cmp     r2, #0
        moveq   sp, r1

        /* Switch to Thumb-2 instructions if necessary */
        SWITCH_FROM_ARM_TO_THUMB_2      r1

        /* Increment interrupt nest and thread dispatch disable level */
        ldr     r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
        add     r2, #1
        add     r3, #1
        str     r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]
        str     r3, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]

        /* Call BSP dependent interrupt dispatcher */
#ifdef RTEMS_PROFILING
        cmp     r2, #1
        bne     .Lskip_profiling
        BLX_TO_THUMB_1  _CPU_Counter_read
        mov     SELF_CPU_CONTROL, r0
        BLX_TO_THUMB_1  bsp_interrupt_dispatch
        BLX_TO_THUMB_1  _CPU_Counter_read
        mov     r2, r0
        mov     r1, SELF_CPU_CONTROL
        GET_SELF_CPU_CONTROL    r0
        mov     SELF_CPU_CONTROL, r0
        BLX_TO_THUMB_1  _Profiling_Outer_most_interrupt_entry_and_exit
.Lprofiling_done:
#else
        BLX_TO_THUMB_1  bsp_interrupt_dispatch
#endif

        /* Load some per-CPU variables */
        ldr     r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
        ldrb    r1, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
        ldr     r2, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
        ldr     r3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]

        /* Restore stack pointer */
        mov     sp, NON_VOLATILE_SCRATCH

        /* Save CPSR in non-volatile register */
        mrs     NON_VOLATILE_SCRATCH, CPSR

        /* Decrement levels and determine thread dispatch state */
        eor     r1, r0
        sub     r0, #1
        orr     r1, r0
        orr     r1, r2
        sub     r3, #1

        /* Store thread dispatch disable and ISR nest levels */
        str     r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]
        str     r3, [SELF_CPU_CONTROL, #PER_CPU_ISR_NEST_LEVEL]

        /*
         * Check thread dispatch necessary, ISR dispatch disable and thread
         * dispatch disable level.
         */
        cmp     r1, #0
        bne     .Lthread_dispatch_done

        /* Thread dispatch */
        mrs     NON_VOLATILE_SCRATCH, CPSR

.Ldo_thread_dispatch:

        /* Set ISR dispatch disable and thread dispatch disable level to one */
        mov     r0, #1
        str     r0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]
        str     r0, [SELF_CPU_CONTROL, #PER_CPU_THREAD_DISPATCH_DISABLE_LEVEL]

        /* Call _Thread_Do_dispatch(), this function will enable interrupts */
        mov     r0, SELF_CPU_CONTROL
        mov     r1, NON_VOLATILE_SCRATCH
        mov     r2, #0x80
        bic     r1, r2
        BLX_TO_THUMB_1  _Thread_Do_dispatch

        /* Disable interrupts */
        msr     CPSR, NON_VOLATILE_SCRATCH

#ifdef RTEMS_SMP
        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL
#endif

        /* Check if we have to do the thread dispatch again */
        ldrb    r0, [SELF_CPU_CONTROL, #PER_CPU_DISPATCH_NEEDED]
        cmp     r0, #0
        bne     .Ldo_thread_dispatch

        /* We are done with thread dispatching */
        mov     r0, #0
        str     r0, [SELF_CPU_CONTROL, #PER_CPU_ISR_DISPATCH_DISABLE]

.Lthread_dispatch_done:

        /* Switch to ARM instructions if necessary */
        SWITCH_FROM_THUMB_2_TO_ARM

#ifdef ARM_MULTILIB_VFP
        /* Restore VFP context */
        ldmia   sp!, {r0, r1}
#ifdef ARM_MULTILIB_VFP_D32
        vldmia  sp!, {d16-d31}
#endif
        vldmia  sp!, {d0-d7}
        vmsr    FPSCR, r0
#endif /* ARM_MULTILIB_VFP */

        /* Restore NON_VOLATILE_SCRATCH register and link register */
        ldmia   sp!, {NON_VOLATILE_SCRATCH, lr}

        /*
         * XXX: Remember and restore stack pointer.  The data on the stack is
         * still in use.  So the stack is now in an inconsistent state.  The
         * FIQ handler implementation must not use this area.
         */
        mov     r0, sp
        add     sp, #CONTEXT_SIZE

        /* Get INT mode program status register */
        mrs     r1, CPSR
        bic     r1, r1, #0x1

        /* Switch to INT mode */
        msr     CPSR_c, r1

        /* Save EXCHANGE_LR and EXCHANGE_SPSR registers to exchange area */
        stmdb   sp!, {EXCHANGE_LR, EXCHANGE_SPSR}

        /* Restore context */
        ldmia   r0, CONTEXT_LIST

        /* Set return address and program status */
        mov     lr, EXCHANGE_LR
        msr     SPSR_fsxc, EXCHANGE_SPSR

        /* Restore EXCHANGE_LR and EXCHANGE_SPSR registers from exchange area */
        ldmia   sp!, {EXCHANGE_LR, EXCHANGE_SPSR}

#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
        /*
         * We must clear reservations here, since otherwise compare-and-swap
         * atomic operations with interrupts enabled may yield wrong results.
         * A compare-and-swap atomic operation is generated by the compiler
         * like this:
         *
         *   .L1:
         *     ldrex r1, [r0]
         *     cmp   r1, r3
         *     bne   .L2
         *     strex r3, r2, [r0]
         *     cmp   r3, #0
         *     bne   .L1
         *   .L2:
         *
         * Consider the following scenario.  A thread is interrupted right
         * before the strex.  The interrupt updates the value using a
         * compare-and-swap sequence.  Everything is fine up to this point.
         * The interrupt performs now a compare-and-swap sequence which fails
         * with a branch to .L2.  The current processor has now a reservation.
         * The interrupt returns without further strex.  The thread updates the
         * value using the unrelated reservation of the interrupt.
         */
        clrex
#endif

        /* Return from interrupt */
        subs    pc, lr, #4

#ifdef RTEMS_PROFILING
#ifdef __thumb2__
.thumb
#else
.arm
#endif
.Lskip_profiling:
        BLX_TO_THUMB_1  bsp_interrupt_dispatch
        b       .Lprofiling_done
#endif

#endif /* ARM_MULTILIB_ARCH_V4 */
[root@centos7 rtems-libbsd-a64]#  arm-rtems6-gdb  build/arm-rtems6-xilinx_zynq_a9_qemu-default/media01.exe
GNU gdb (GDB) 10.1.90.20210409-git
Copyright (C) 2021 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later //gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Type "show copying" and "show warranty" for details.
This GDB was configured as "--host=aarch64-linux-gnu --target=arm-rtems6".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
//www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
    //www.gnu.org/software/gdb/documentation/>.

For help, type "help".
Type "apropos word" to search for commands related to "word"...
Reading symbols from build/arm-rtems6-xilinx_zynq_a9_qemu-default/media01.exe...
(gdb) b arm_exc_interrupt
Function "arm_exc_interrupt" not defined.
Make breakpoint pending on future shared library load? (y or [n]) n
(gdb) b _ARMV4_Exception_interrupt
Breakpoint 1 at 0x1faf1c: file ../../../cpukit/score/cpu/arm/arm_exc_interrupt.S, line 56.
(gdb) c
The program is not being run.
(gdb) target remote:1234
Remote debugging using :1234
bsp_start_vector_table_end () at ../../../bsps/arm/shared/start/start.S:192
192     ../../../bsps/arm/shared/start/start.S: No such file or directory.
(gdb) c
Continuing.

Breakpoint 1, _ARMV4_Exception_interrupt () at ../../../cpukit/score/cpu/arm/arm_exc_interrupt.S:56
56      ../../../cpukit/score/cpu/arm/arm_exc_interrupt.S: No such file or directory.
(gdb) bt
#0  _ARMV4_Exception_interrupt () at ../../../cpukit/score/cpu/arm/arm_exc_interrupt.S:56
#1  0x001fb18c in ?? ()
Backtrace stopped: previous frame identical to this frame (corrupt stack?)