forked from Lephenixnoir/gint
Add thread support (SH4)
* thread context switching (support ETMU and TMU) * thread interface (not optimal) * thread scheduler (basic, non-preemptif) * thread signals interface (not implemented yet but linked to the scheduler) * thread driver (not complete yet)
This commit is contained in:
parent
ad6c108dfc
commit
f02ca55429
13
TODO
13
TODO
|
@ -12,6 +12,19 @@ Extensions on existing code:
|
|||
* core: run destructors when a task-switch results in leaving the app
|
||||
* core rtc: use qdiv10 to massively improve division performance
|
||||
* topti: let the font specify letter and word spacing
|
||||
* thread:
|
||||
- provide better interface for thread creation:
|
||||
- created thread is unable the be in zombie state
|
||||
- created thread is linked to another thread (like a parent)
|
||||
- provide interface for the idle thread
|
||||
- allow menu-return
|
||||
- allow custom idle handler
|
||||
- provide better interface for the "main" thread:
|
||||
- allow custom jmp_buff, which will overwrite the died thread's
|
||||
context with the saved jmpbuf context and perform a
|
||||
longjmp() with it.
|
||||
- pseudo-preemption using pthread_yield() design(?)
|
||||
- move the keyboard driver into a thread(?)
|
||||
|
||||
Future directions.
|
||||
* A complete file system abstraction
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
#ifndef GINT_STD_SETJMP
|
||||
# define GINT_STD_SETJMP
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
/* Custom(?) jmp_buf struct
|
||||
|
||||
@note:
|
||||
We save only r8 ~ r15 and SR / PC registers. The SR register is saved first
|
||||
because the longjump() can be involved with a different register bank.
|
||||
So, to avoid this, it's simpler to restore the saved SR first then restore
|
||||
all registers (see <src/setjmp/longjmp.S>). */
|
||||
struct __jmp_buf
|
||||
{
|
||||
uint32_t sr;
|
||||
uint32_t reg[8];
|
||||
uint32_t gbr;
|
||||
uint32_t macl;
|
||||
uint32_t mach;
|
||||
uint32_t pr;
|
||||
};
|
||||
|
||||
/* User jmp_buf alias */
|
||||
typedef struct __jmp_buf jmp_buf[1];
|
||||
|
||||
|
||||
|
||||
/* setjmp(): store the calling environment in ENV
|
||||
|
||||
This function saves various information about the calling environment
|
||||
(typically, the stack pointer, the instruction pointer, the values of some
|
||||
registers and the signal mask) in the buffer ENV for later use by longjmp().
|
||||
In this case, setjmp() returns 0 */
|
||||
extern int setjmp(jmp_buf env);
|
||||
|
||||
/* longjmp(): effectuate non-local goto using the saved ENV
|
||||
|
||||
This function uses the information saved in env to transfer control back to
|
||||
the point where setjmp() was called and to restore ("rewind") the stack to its
|
||||
state at the time of the setjmp() call.
|
||||
|
||||
Following a successful longjmp(), execution continues as if setjmp() had
|
||||
returned for a second time. This "fake" return can be distinguished from a
|
||||
true setjmp() call because the "fake return" returns the value provided in
|
||||
val. If the programmer mistakenly passes the value 0 in val, the "fake" return
|
||||
will instead return 1. */
|
||||
extern void longjmp(jmp_buf env, int val);
|
||||
|
||||
|
||||
#endif /* GINT_STD_SETJMP */
|
|
@ -0,0 +1,143 @@
|
|||
#ifndef GINT_THREAD
|
||||
# define GINT_THREAD
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
/* Define the default context switching frequency */
|
||||
#ifndef THREAD_SCHEDULER_FREQUENCY
|
||||
# define THREAD_SCHEDULER_FREQUENCY 16
|
||||
#endif
|
||||
|
||||
/* Define the default thread stack size */
|
||||
#ifndef THREAD_STACK_SIZE
|
||||
# define THREAD_STACK_SIZE (4 * 1024)
|
||||
#endif
|
||||
|
||||
/* Define the default kernel idle thread's stack size */
|
||||
#ifndef THREAD_KERNEL_IDLE_STACK_SIZE
|
||||
# define THREAD_KERNEL_IDLE_STACK_SIZE 32
|
||||
#endif
|
||||
|
||||
/* Define thread ID alias */
|
||||
typedef uint32_t thread_t;
|
||||
|
||||
/* cpu_ctx: whole SH3-based CPU context definition */
|
||||
struct cpu_ctx
|
||||
{
|
||||
uint32_t reg[16];
|
||||
uint32_t gbr;
|
||||
uint32_t macl;
|
||||
uint32_t mach;
|
||||
uint32_t ssr;
|
||||
uint32_t spc;
|
||||
uint32_t pr;
|
||||
};
|
||||
|
||||
/* struct thread: Thread structure definition */
|
||||
//TODO: all hardware context !
|
||||
//TODO: signals context !
|
||||
//TODO: signals mask !
|
||||
struct thread {
|
||||
/* hardware context */
|
||||
struct {
|
||||
struct cpu_ctx cpu;
|
||||
} context;
|
||||
|
||||
/* thread status */
|
||||
enum {
|
||||
THREAD_STATUS_STOPPED, /* thread initialized but not stated */
|
||||
THREAD_STATUS_RUNNING, /* thread started */
|
||||
THREAD_STATUD_PAUSED, /* thread paused (not implemented) */
|
||||
THREAD_STATUS_ZOMBIE, /* thread terminated but not removed */
|
||||
} status;
|
||||
|
||||
/* private information (used by the scheduler) */
|
||||
struct {
|
||||
uintptr_t stack; /* original stack address */
|
||||
thread_t id; /* thread ID */
|
||||
void *ret; /* saved exited value */
|
||||
struct thread *next; /* potential next process */
|
||||
} private;
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
//---
|
||||
// User thread interface
|
||||
//---
|
||||
|
||||
/* thread_create(): Create a new thread */
|
||||
extern int thread_create(thread_t *thread, int nb_arg, void *function, ...);
|
||||
|
||||
/* thread_join(): Waits for the thread specified by thread to terminate */
|
||||
extern int thread_join(thread_t thread, void **retvel);
|
||||
|
||||
/* thread_exit(): Terminate the calling thread */
|
||||
extern void thread_exit(void *retval);
|
||||
|
||||
|
||||
|
||||
//---
|
||||
// Thread atomic operation
|
||||
//---
|
||||
/* thread_atomic_start(): Start atomic operation
|
||||
|
||||
This function will block interruptions and exception until
|
||||
"thread_atomic_stop()" is called. This is really useful when you need to
|
||||
secure some tricky part of code (like driver kernel-level implementation).
|
||||
|
||||
But be carefull: your code executed after this function SHOULD be
|
||||
EXCEPTION-SAFE ! Otherwise, a crash will occur and Gint can do nothing to
|
||||
avoid it because is hardware specific. If you need to secure shared data,
|
||||
use mutex instead.
|
||||
|
||||
This implementation is recursive-safe and will return:
|
||||
* SR value when you enter in "atomic" operation (first call)
|
||||
* 0 if you are already in a "atomic" operation (x call)
|
||||
To return to the "normal" operation, you should call "thread_atomic_stop()"
|
||||
as many time as you have involved with "thread_atomic_start()". */
|
||||
extern uint32_t thread_atomic_start(void);
|
||||
|
||||
/* thread_atomic_stop(): Stop atomic opration
|
||||
|
||||
This function will try to return to the "normal" mode and will return:
|
||||
* negative value If error occur
|
||||
* 0 If you are alwayrs in "atomic" mode
|
||||
* the restored SR value If you are returned to the "clasic" mode */
|
||||
extern uint32_t thread_atomic_stop(void);
|
||||
|
||||
|
||||
|
||||
|
||||
//---
|
||||
// Thread signals management
|
||||
//---
|
||||
/* thread_signals_deliver_pending(): Deliver all pending signals
|
||||
|
||||
This function is KERNEL-ONLY and SOULD NEVER be called because it is
|
||||
exclusively reserved for the internal thread scheduler. (see
|
||||
<gint/thread/scheduler.c> for more information). */
|
||||
extern int thread_signals_pending_deliver(struct thread *thread);
|
||||
|
||||
|
||||
|
||||
|
||||
//---
|
||||
// Kernel thread interface
|
||||
//---
|
||||
/* thread_kernel_idle_code(): Idle code used by the idle thread
|
||||
|
||||
The idle thread is running when none of the registered thread is able to run.
|
||||
This dummy (for now) thread will just wait until the next Gint event. */
|
||||
extern void thread_kernel_idle_code(void);
|
||||
|
||||
/* thread_kernel_terminate_trampoline(): Termination trampiline code.
|
||||
|
||||
This function will invoke the "thread_exit()" function with 0. This function
|
||||
is automatically involved when a thread return from his main procedure and
|
||||
SHOULD NOT be called. */
|
||||
extern void thread_kernel_terminate_trampoline(void);
|
||||
#endif /* GINT_THREAD */
|
|
@ -236,4 +236,51 @@ void timer_reload(int timer, uint32_t delay);
|
|||
bytes) and increments it. */
|
||||
int timer_timeout(volatile void *arg);
|
||||
|
||||
|
||||
|
||||
|
||||
//---
|
||||
// Debugging interface
|
||||
//
|
||||
// @note
|
||||
// This function is used internally by the thread scheduler to precalculate
|
||||
// timer information needed to switch context quickly. This is a draft
|
||||
// interface that can be grandly improved.
|
||||
//---
|
||||
/* Type of hardware information that can be get using the timer debugging
|
||||
interface */
|
||||
struct timer_debug_info
|
||||
{
|
||||
/* context related information */
|
||||
struct {
|
||||
struct {
|
||||
void *tstr;
|
||||
void *tcor;
|
||||
void *tcnt;
|
||||
void *tcr;
|
||||
} address;
|
||||
struct {
|
||||
uint32_t tcor;
|
||||
uint32_t tcnt;
|
||||
uint16_t tcr;
|
||||
} context;
|
||||
} hardware;
|
||||
|
||||
/* interrupt related information */
|
||||
struct {
|
||||
struct {
|
||||
struct {
|
||||
uint16_t unf;
|
||||
uint16_t unie;
|
||||
} tcr;
|
||||
struct {
|
||||
uint8_t str;
|
||||
} tstr;
|
||||
} mask;
|
||||
int id;
|
||||
} interrupt;
|
||||
};
|
||||
/* timer_get_hw_info(): Get timer hardware information */
|
||||
extern int timer_debug_get_hw_info(int id, struct timer_debug_info *info);
|
||||
|
||||
#endif /* GINT_TIMER */
|
||||
|
|
|
@ -62,18 +62,38 @@ _gint_inth_7305:
|
|||
1: .long 0xff000028
|
||||
#endif
|
||||
|
||||
/* SH7305-TYPE INTERRUPT HANDLER ENTRY - 40 BYTES */
|
||||
/* SH7305-TYPE INTERRUPT HANDLER ENTRY - 54 BYTES (2 blocks in total) */
|
||||
|
||||
_gint_inth_7305:
|
||||
/* First, we need to check the thread scheduler.
|
||||
Due to the interrupt handler architecture, we cannot store this part
|
||||
here. So, we need to jump into the scheduler's checking procedure,
|
||||
but without modify the current user context.
|
||||
|
||||
@note
|
||||
PR will be updated when "jsr" instruction is executed, R15 will be
|
||||
lost if we push something into it ; So, let's send them using
|
||||
non-conventional method. (see <gint/thread/scheduler.S> for more
|
||||
information)
|
||||
|
||||
On SH4 processor, a special register named SGR save the value of R15
|
||||
when interrupts / exceptions occur, but the SH3 doesn't have it, this
|
||||
is why we save the stack value before using it. */
|
||||
mov r15, r1
|
||||
sts pr, r0
|
||||
mov.l r0, @-r15
|
||||
mov.l 1f, r2
|
||||
jsr @r2
|
||||
nop
|
||||
|
||||
/* Save caller-saved registers which might currently be in use by the
|
||||
interrupted function, as we don't know what the callback will do */
|
||||
sts.l pr, @-r15
|
||||
stc.l gbr, @-r15
|
||||
sts.l mach, @-r15
|
||||
sts.l macl, @-r15
|
||||
|
||||
/* Get the event code from the INTEVT register */
|
||||
mov.l 1f, r0
|
||||
mov.l 2f, r0
|
||||
mov.l @r0, r0
|
||||
|
||||
/* Interrupt codes start at 0x400 */
|
||||
|
@ -96,8 +116,15 @@ _gint_inth_7305:
|
|||
rte
|
||||
nop
|
||||
|
||||
.zero 24
|
||||
1: .long 0xff000028
|
||||
/* force 4-alignement */
|
||||
.zero 2
|
||||
|
||||
/* information */
|
||||
1: .long _thread_kernel_sched_procedure
|
||||
2: .long 0xff000028
|
||||
|
||||
/* block padding */
|
||||
.zero 8
|
||||
.first_entry:
|
||||
|
||||
#ifdef FX9860G
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <gint/gint.h>
|
||||
#include <gint/hardware.h>
|
||||
#include <gint/exc.h>
|
||||
#include <gint/thread.h>
|
||||
|
||||
#include "kernel.h"
|
||||
|
||||
|
@ -61,7 +62,7 @@ static void regcpy(uint32_t * restrict l, int32_t s, uint32_t * restrict r)
|
|||
s -= 16;
|
||||
}
|
||||
}
|
||||
#define regcpy(l, s, r) regcpy(l, (int32_t)s, r)
|
||||
#define regcpy(l, s, r) regcpy(l, (intptr_t)s, r)
|
||||
|
||||
/* regclr(): Clear a memory region using symbol information
|
||||
@r Source pointer (base address)
|
||||
|
@ -77,7 +78,7 @@ static void regclr(uint32_t *r, int32_t s)
|
|||
s -= 16;
|
||||
}
|
||||
}
|
||||
#define regclr(r, s) regclr(r, (int32_t)s)
|
||||
#define regclr(r, s) regclr(r, (intptr_t)s)
|
||||
|
||||
/* callarray(): Call an array of functions (constructors or destructors)
|
||||
@f First element of array
|
||||
|
@ -175,10 +176,18 @@ int start(int isappli, int optnum)
|
|||
|
||||
callarray(&bctors, &ectors);
|
||||
|
||||
/* "Pre-main" loop.
|
||||
We're trying to run the main function using a thread, if the thread
|
||||
creation fail, we will use the "classic" way to do the job. */
|
||||
int rc = 1;
|
||||
thread_t thread;
|
||||
while(1)
|
||||
{
|
||||
rc = main(isappli, optnum);
|
||||
if (thread_create(&thread, 2, &main, isappli, optnum) != 0) {
|
||||
rc = main(isappli, optnum);
|
||||
} else {
|
||||
thread_join(thread, (void**)&rc);
|
||||
}
|
||||
if(!gint_restart) break;
|
||||
gint_osmenu();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
** gint:std:setjmp - performing a nonlocal goto
|
||||
*/
|
||||
|
||||
.section .gint.mapped, "ax"
|
||||
.align 4
|
||||
|
||||
|
||||
.global _setjmp
|
||||
.type _setjmp, @function
|
||||
|
||||
/* setjmp(): saves various information about the calling environment */
|
||||
_setjmp:
|
||||
/* block interrupts / exceptions (this part should b exception-safe) */
|
||||
stc sr, r1
|
||||
mov r1, r3
|
||||
mov #0x10, r2
|
||||
shll8 r2
|
||||
shll16 r2
|
||||
or r2, r1
|
||||
ldc r1, sr
|
||||
|
||||
/* save current environment */
|
||||
add #52, r4
|
||||
mov.l r3, @-r4 ! previous SR register status
|
||||
sts.l pr, @-r4
|
||||
sts.l mach, @-r4
|
||||
sts.l macl, @-r4
|
||||
stc.l gbr, @-r4
|
||||
mov.l r15, @-r4
|
||||
mov.l r14, @-r4
|
||||
mov.l r13, @-r4
|
||||
mov.l r12, @-r4
|
||||
mov.l r11, @-r4
|
||||
mov.l r10, @-r4
|
||||
mov.l r9, @-r4
|
||||
mov.l r8, @-r4
|
||||
|
||||
/* restore sr then exit */
|
||||
ldc r3, sr
|
||||
rts
|
||||
mov #0, r0
|
||||
|
||||
|
||||
|
||||
|
||||
.global _longjmp
|
||||
.type _longjmp, @function
|
||||
|
||||
/* longjmp(): restore the saved environment */
|
||||
_longjmp:
|
||||
/* block interrupt */
|
||||
stc sr, r1
|
||||
mov #0x10, r2
|
||||
shll8 r2
|
||||
shll16 r2
|
||||
or r2, r1
|
||||
ldc r1, sr
|
||||
|
||||
/* check and save arg validity into unbankable register to avoid
|
||||
error when the "old" SR register will be restored */
|
||||
tst r5, r5
|
||||
mov r4, r8
|
||||
bf/s env_switch
|
||||
mov r5, r9
|
||||
mov #1, r9
|
||||
|
||||
env_switch:
|
||||
/* load the old SR regsiter first to force register bank switch (if
|
||||
occur) then move the context and the returned value into non-saved
|
||||
(by the setjmp context) registers. */
|
||||
ldc.l @r8+, sr
|
||||
mov r8, r4
|
||||
mov r9, r0
|
||||
|
||||
/* restore all old registers */
|
||||
mov.l @r4+, r8
|
||||
mov.l @r4+, r9
|
||||
mov.l @r4+, r10
|
||||
mov.l @r4+, r11
|
||||
mov.l @r4+, r12
|
||||
mov.l @r4+, r13
|
||||
mov.l @r4+, r14
|
||||
mov.l @r4+, r15
|
||||
ldc.l @r4+, gbr
|
||||
lds.l @r4+, macl
|
||||
lds.l @r4+, mach
|
||||
lds.l @r4+, pr
|
||||
|
||||
rts
|
||||
nop
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
** gint:thread:atomic - Thread atomic helper
|
||||
*/
|
||||
|
||||
.section .gint.mapped, "ax"
|
||||
.align 4
|
||||
|
||||
.global _thread_atomic_start
|
||||
.type _thread_atomic_start, @function
|
||||
|
||||
/* thread_atomic_start(): Mask interrupts and exceptions */
|
||||
_thread_atomic_start:
|
||||
/* Check if the user is currently into an atomic state and update
|
||||
atomic counter. */
|
||||
mov.l thread_atomic_counter, r1
|
||||
mov.l @r1, r2
|
||||
tst r2, r2
|
||||
add #1, r2
|
||||
mov.l r2, @r1
|
||||
bf/s atomic_start_exit
|
||||
mov #-1, r0
|
||||
|
||||
/* Mask interruptions / exceptions using the IMASK and BL filed of SR */
|
||||
stc sr, r1
|
||||
mov r1, r0
|
||||
mov.l sr_mask, r2
|
||||
or r2, r1
|
||||
ldc r1, sr
|
||||
|
||||
/* Save "old" SR register. */
|
||||
mov.l thread_atomic_sr_save, r1
|
||||
mov.l r0, @r1
|
||||
|
||||
atomic_start_exit:
|
||||
rts
|
||||
nop
|
||||
|
||||
|
||||
.global _thread_atomic_stop
|
||||
.type _thread_atomic_stop, @function
|
||||
|
||||
/* thread_atomic_stop(): Unmask (if possible) interrupts / exceptions signals */
|
||||
_thread_atomic_stop:
|
||||
/* Check if the device is currently into an atomic operation, then
|
||||
update the counter and, if needed, restore the SR register. */
|
||||
mov.l thread_atomic_counter, r1
|
||||
mov.l @r1, r0
|
||||
tst r0, r0
|
||||
bt atomic_end_error
|
||||
cmp/eq #1, r0
|
||||
add #-1, r0
|
||||
mov.l r0, @r1
|
||||
bf/s atomic_end_exit
|
||||
mov #0, r0
|
||||
|
||||
/* Restore saved SR register data. */
|
||||
mov.l thread_atomic_sr_save, r1
|
||||
mov.l @r1, r0
|
||||
ldc r0, sr
|
||||
bra atomic_end_exit
|
||||
nop
|
||||
|
||||
atomic_end_error:
|
||||
mov #-1, r0
|
||||
|
||||
atomic_end_exit:
|
||||
rts
|
||||
nop
|
||||
|
||||
.align 4
|
||||
thread_atomic_counter: .long _thread_atomic_counter
|
||||
thread_atomic_sr_save: .long _thread_atomic_sr_save
|
||||
sr_mask: .long 0x100000f0
|
||||
|
||||
|
||||
|
||||
/*
|
||||
** Global symbols
|
||||
*/
|
||||
.global _thread_atomic_sr_save
|
||||
.type _thread_atomic_sr_save, @object
|
||||
.global _thread_atomic_counter
|
||||
.type _thread_atomic_counter, @object
|
||||
|
||||
.align 4
|
||||
_thread_atomic_sr_save: .long 0x00000000
|
||||
_thread_atomic_counter: .long 0x00000000
|
|
@ -0,0 +1,259 @@
|
|||
/*
|
||||
** gint:thread:kernel - Thread Kernel inferface
|
||||
** This file contains all "kernel" low-level part of the thread module:
|
||||
** - thread_kernel_sched_procedure() Thread scheduler procedure
|
||||
** - thread_kernel_terminate_trampoline() Thread terminsation trampoline
|
||||
** - thread_kernel_idle_code() Idle thread's code
|
||||
*/
|
||||
|
||||
.section .gint.mapped, "ax"
|
||||
.align 4
|
||||
|
||||
.global _thread_kernel_sched_procedure
|
||||
.type _thread_kernel_sched_procedure, @function
|
||||
|
||||
/* thread_kernel_sched_procedure(): Scheduler entry
|
||||
|
||||
This "function" is involved when ANY interruption occur, this is the only way
|
||||
to add this scheduler without broke all Gint's VBR software architecture.
|
||||
|
||||
The scheduler use a ~128hz timer to schedule thread contexts. During the
|
||||
"driver" initialization (yes, the thread module is considered like a driver
|
||||
for Gint's eyes) one timer has been locked and all hardware information about
|
||||
it has been pre-calculated during this part.
|
||||
|
||||
@note
|
||||
Due to the current Gint's VBR architecture, this function is involved each
|
||||
time an interruption occur. By the fact that this function is involved like
|
||||
a common subroutine, PR register has been saved into the R0 register and the
|
||||
R15 register has been saved into the R1 register.
|
||||
|
||||
We are always with the "interrupt" bank register configuration. So, we only
|
||||
can use r0~r7 register until the current thread context has been saved.
|
||||
|
||||
When involved, R0 content the PR register snapshot when interrupts occur,
|
||||
same for the R1 register which content the stack snapshot. (Yes, I knewn that
|
||||
the SH4-based MPU provide the SGR register which saves the stack snapshot
|
||||
when interrupts / exceptions occur, but we need to be SH3 compatible) */
|
||||
_thread_kernel_sched_procedure:
|
||||
/* First, check if the interruption is the scheduler's timer */
|
||||
mov.l intevt_register, r2
|
||||
mov.l thread_tmu_interrupt_id, r3
|
||||
mov.l @r2, r2
|
||||
mov.l @r3, r3
|
||||
cmp/eq r2, r3
|
||||
bt stop_timer
|
||||
rts
|
||||
nop
|
||||
|
||||
stop_timer:
|
||||
/* Stop the scheduler's timer to "avoid" counting the context
|
||||
switching time.
|
||||
|
||||
@note:
|
||||
The scheduler can use ETMU instead of a classical TMU. ETMU have
|
||||
slow writting interraction, but have the same access size that the
|
||||
ETMU, this is why we use slow operation here without checking the
|
||||
timer type. */
|
||||
mov.l thread_tmu_tstr_mask, r2
|
||||
mov.l thread_tmu_tstr_addr, r3
|
||||
mov.b @r2, r2
|
||||
mov.b @r3, r4
|
||||
not r2, r2
|
||||
and r2, r4
|
||||
tstr_slow_clear:
|
||||
mov.b r4, @r3
|
||||
mov.b @r3, r5
|
||||
cmp/eq r5, r4
|
||||
bf tstr_slow_clear
|
||||
|
||||
save_context:
|
||||
/* check if a current thread is running */
|
||||
mov.l thread_current_context, r2
|
||||
mov.l @r2, r2
|
||||
tst r2, r2
|
||||
bt/s schedule_thread
|
||||
|
||||
/* save current thread context */
|
||||
add #88, r2
|
||||
mov.l r0, @-r2 /* R0 contains user PR register snapshot */
|
||||
stc.l spc, @-r2
|
||||
stc.l ssr, @-r2
|
||||
sts.l mach, @-r2
|
||||
sts.l macl, @-r2
|
||||
stc.l gbr, @-r2
|
||||
mov.l r1, @-r2 /* R1 contains user R15 register snapshot */
|
||||
mov.l r14, @-r2
|
||||
mov.l r13, @-r2
|
||||
mov.l r12, @-r2
|
||||
mov.l r11, @-r2
|
||||
mov.l r10, @-r2
|
||||
mov.l r9, @-r2
|
||||
mov.l r8, @-r2
|
||||
stc.l R7_BANK, @-r2
|
||||
stc.l R6_BANK, @-r2
|
||||
stc.l R5_BANK, @-r2
|
||||
stc.l R4_BANK, @-r2
|
||||
stc.l R3_BANK, @-r2
|
||||
stc.l R2_BANK, @-r2
|
||||
stc.l R1_BANK, @-r2
|
||||
stc.l R0_BANK, @-r2
|
||||
|
||||
schedule_thread:
|
||||
/* Call high-level abraction
|
||||
|
||||
@note
|
||||
We need to save the R1/R0 registers (that contains the user stack/PR
|
||||
register snapshot respectively) because if the scheduler fail, we
|
||||
SHOULD be able to restore the context before returning from here. */
|
||||
mov.l r1, @-r15
|
||||
mov.l r0, @-r15
|
||||
mov.l thread_schedule, r0
|
||||
jsr @r0
|
||||
nop
|
||||
#ifdef THREAD_SCHEDULER_DEBUG
|
||||
mov.l thread_schedule_debug, r1
|
||||
jsr @r1
|
||||
mov r0, r4
|
||||
#endif
|
||||
tst r0, r0
|
||||
lds.l @r15+, pr
|
||||
bt/s scheduler_restart_timer
|
||||
mov.l @r15+, r15
|
||||
|
||||
|
||||
context_restore:
|
||||
/* restore the new context */
|
||||
ldc.l @r0+, R0_BANK
|
||||
ldc.l @r0+, R1_BANK
|
||||
ldc.l @r0+, R2_BANK
|
||||
ldc.l @r0+, R3_BANK
|
||||
ldc.l @r0+, R4_BANK
|
||||
ldc.l @r0+, R5_BANK
|
||||
ldc.l @r0+, R6_BANK
|
||||
ldc.l @r0+, R7_BANK
|
||||
mov.l @r0+, r8
|
||||
mov.l @r0+, r9
|
||||
mov.l @r0+, r10
|
||||
mov.l @r0+, r11
|
||||
mov.l @r0+, r12
|
||||
mov.l @r0+, r13
|
||||
mov.l @r0+, r14
|
||||
mov.l @r0+, r15
|
||||
ldc.l @r0+, gbr
|
||||
lds.l @r0+, macl
|
||||
lds.l @r0+, mach
|
||||
ldc.l @r0+, ssr
|
||||
ldc.l @r0+, spc
|
||||
lds.l @r0+, pr
|
||||
|
||||
scheduler_restart_timer:
|
||||
/* Check if the scheduler uses TMU or ETMU */
|
||||
mov.l thread_scheduler_gint_timer_id, r0
|
||||
mov.l @r0, r0
|
||||
mov #3, r1
|
||||
cmp/ge r1, r0
|
||||
|
||||
/* Get scheduler timer pre-calculated information */
|
||||
mov.l thread_tmu_tcr_mask, r0
|
||||
mov.l thread_tmu_tcr_addr, r1
|
||||
mov.l @r1, r1
|
||||
mov.l @r0, r0
|
||||
not r0, r0
|
||||
mov.l thread_tmu_tstr_mask, r2
|
||||
mov.l thread_tmu_tstr_addr, r3
|
||||
bf/s tmu_restart_timer
|
||||
mov.l @r2, r2
|
||||
|
||||
etmu_restart_timer:
|
||||
/* clear interrupt flag (ETMU.TCR.UNF) */
|
||||
mov.b @r1, r4
|
||||
and r0, r4
|
||||
tcr_slow_write:
|
||||
mov.b r4, @r1
|
||||
mov.b @r1, r0
|
||||
cmp/eq r4, r0
|
||||
bf tcr_slow_write
|
||||
|
||||
/* Restart timer (ETMU.TSTR) */
|
||||
mov.b @r3, r4
|
||||
or r2, r4
|
||||
tstr_slow_write:
|
||||
mov.b r4, @r3
|
||||
mov.b @r3, r0
|
||||
cmp/eq r4, r0
|
||||
bf tstr_slow_write
|
||||
bra process_switch
|
||||
nop
|
||||
|
||||
tmu_restart_timer:
|
||||
/* clear interrupt flag (TMU.TCR) */
|
||||
mov.w @r1, r4 ! TCR addr
|
||||
and r0, r4 ! TCR.UNF = 0
|
||||
mov.w r4, @r1
|
||||
|
||||
/* Restart timer (TMU.TSTR) */
|
||||
mov.b @r3, r4
|
||||
or r2, r4
|
||||
mov.b r4, @r3
|
||||
|
||||
process_switch:
|
||||
rte
|
||||
nop
|
||||
|
||||
.align 4
|
||||
/* scheduler global information */
|
||||
thread_current_context: .long _thread_scheduler_current
|
||||
|
||||
/* High-level functions */
|
||||
thread_schedule: .long _thread_schedule
|
||||
thread_schedule_debug: .long _thread_schedule_debug
|
||||
|
||||
/* Timer hardware pre-calculated information */
|
||||
thread_scheduler_gint_timer_id: .long _thread_scheduler_gint_timer_id
|
||||
thread_tmu_interrupt_id: .long _thread_tmu_interrupt_id
|
||||
thread_tmu_tcr_addr: .long _thread_tmu_tcr_addr
|
||||
thread_tmu_tcr_mask: .long _thread_tmu_tcr_mask
|
||||
thread_tmu_tstr_mask: .long _thread_tmu_tstr_mask
|
||||
thread_tmu_tstr_addr: .long _thread_tmu_tstr_addr
|
||||
|
||||
/* Other information */
|
||||
intevt_register: .long 0xff000028
|
||||
|
||||
|
||||
|
||||
|
||||
.global _thread_kernel_terminate_trampoline
|
||||
.type _thread_kernel_terminate_trampoline, @function
|
||||
|
||||
/* thread_kernel_terminate_trampoline(): Thread termination trampoline
|
||||
|
||||
This function SHOULD NEVER be called manually! It will be involved when a
|
||||
thread return from his main procedure. This part exists only for saving the
|
||||
returned value then involved the high-level function "thread_terminate()"
|
||||
(see <gint/thread/thread.c> for more information). */
|
||||
_thread_kernel_terminate_trampoline:
|
||||
mov.l thread_exit, r1
|
||||
jmp @r1
|
||||
mov #0, r4
|
||||
|
||||
.align 4
|
||||
thread_exit: .long _thread_exit
|
||||
|
||||
|
||||
|
||||
|
||||
.global _thread_kernel_idle_code
|
||||
.type _thread_kernel_idle_code, @function
|
||||
|
||||
/* thread_kernel_idle_code(): Idle code for the idle thread
|
||||
|
||||
This procedure will do nothing and will wait any hardware signal.
|
||||
This thread's code is involved when zero thread can be executed, his role is
|
||||
to maintain the scheduler up for the eventual delayed signal which woke up
|
||||
one interrupted thread. */
|
||||
_thread_kernel_idle_code:
|
||||
bra _thread_kernel_idle_code
|
||||
nop
|
||||
rts
|
||||
nop
|
|
@ -0,0 +1,100 @@
|
|||
//---
|
||||
// gint:thread:scheduler - High-level scheduler part
|
||||
//---
|
||||
#include <gint/thread.h>
|
||||
#include <gint/display.h>
|
||||
|
||||
/* symbols defined in <gint/thread.thread.c> */
|
||||
extern struct thread *thread_scheduler_queue;
|
||||
extern struct thread *thread_scheduler_current;
|
||||
extern struct thread thread_kernel_idle;
|
||||
|
||||
/* thread_schedule(): Schedule thread queue and return the next thread
|
||||
|
||||
This function SHOULD NOT be involved manually, it should be be involved only
|
||||
by the scheduler "handler" (see <gint/thread/kernel.S>). */
|
||||
struct thread *thread_schedule(void)
|
||||
{
|
||||
struct thread *thread;
|
||||
|
||||
/* Check potential error. */
|
||||
if (thread_scheduler_queue == NULL)
|
||||
return (NULL);
|
||||
|
||||
/* if we have no currently running thread, return directly
|
||||
TODO: stop the scheduler timer until multiple thread is detected. */
|
||||
thread = thread_scheduler_queue;
|
||||
if (thread_scheduler_current == NULL) {
|
||||
thread_scheduler_current = thread;
|
||||
return (thread);
|
||||
}
|
||||
|
||||
/* Get the potential next thread context. */
|
||||
thread = thread->private.next;
|
||||
if (thread == NULL)
|
||||
thread = thread_scheduler_queue;
|
||||
|
||||
/* Check all scheduler thread in the queue by checking their status and
|
||||
pending signals. */
|
||||
while (thread != thread_scheduler_current) {
|
||||
/* Check if the potential next thread is valid
|
||||
|
||||
@note:
|
||||
A thread who has a pending signal which involve user signals
|
||||
handler is considered like a valid thread, this is why we
|
||||
deliver signals now. */
|
||||
if (thread->status == THREAD_STATUS_RUNNING
|
||||
&& thread_signals_pending_deliver(thread) == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Get the potential next thread */
|
||||
thread = thread->private.next;
|
||||
if (thread == NULL)
|
||||
thread = thread_scheduler_queue;
|
||||
}
|
||||
|
||||
/* Check if no thread has been found, check its signals and see if the
|
||||
context can be loaded again. */
|
||||
if (thread == thread_scheduler_current) {
|
||||
if (thread->status == THREAD_STATUS_RUNNING
|
||||
&& thread_signals_pending_deliver(thread) == 0) {
|
||||
return (thread);
|
||||
}
|
||||
|
||||
/* If no thread has been found, load idle kernel thread which
|
||||
will only wait the next scheduler timer intervention.
|
||||
(see <gint/thread/thread.c> for more information). */
|
||||
return (&thread_kernel_idle);
|
||||
}
|
||||
|
||||
/* return the next thread */
|
||||
thread_scheduler_current = thread;
|
||||
return (thread);
|
||||
}
|
||||
|
||||
#ifdef THREAD_SCHEDULER_DEBUG
|
||||
/* thread_schedule_debug(): Debug scheduler, involved each context_switch */
|
||||
void thread_schedule_debug(struct thread *thread)
|
||||
{
|
||||
extern uint32_t thread_tmu_interrupt_id;
|
||||
extern uint32_t thread_tmu_tcr_addr;
|
||||
extern uint32_t thread_tmu_tcr_mask;
|
||||
extern uint32_t thread_tmu_tstr_mask;
|
||||
extern uint32_t thread_tmu_tstr_addr;
|
||||
|
||||
dclear(C_WHITE);
|
||||
dprint(1, 1, C_BLACK, "next process -> %p", thread);
|
||||
dprint(1, 11, C_BLACK, "|--spc: %p", thread->context.cpu.spc);
|
||||
dprint(1, 21, C_BLACK, "|--ssr: %p", thread->context.cpu.ssr);
|
||||
dprint(1, 31, C_BLACK, "|--r15: %p", thread->context.cpu.reg[15]);
|
||||
dprint(1, 41, C_BLACK, "|--@r0: %p", &thread->context.cpu.reg[0]);
|
||||
dprint(1, 61, C_BLACK, "|--tmu_event: %#x", thread_tmu_interrupt_id);
|
||||
dprint(1, 71, C_BLACK, "|--tmu_tcr_addr: %#x", thread_tmu_tcr_addr);
|
||||
dprint(1, 81, C_BLACK, "|--tmu_tcr_mask: %#x", thread_tmu_tcr_mask);
|
||||
dprint(1, 91, C_BLACK, "|--tmu_tstr_addr: %#x", thread_tmu_tstr_addr);
|
||||
dprint(1, 101, C_BLACK, "`--tmu_tstr_mask: %#x", thread_tmu_tstr_mask);
|
||||
dupdate();
|
||||
while (1);
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,12 @@
|
|||
//---
|
||||
// gint:thread:signal - Thread Signals management
|
||||
//---
|
||||
#include <gint/thread.h>
|
||||
|
||||
/* thread_signals_deliver_pending(): Deliver all pending signals */
|
||||
int thread_signals_pending_deliver(struct thread *thread)
|
||||
{
|
||||
//TODO
|
||||
(void)thread;
|
||||
return (0);
|
||||
}
|
|
@ -0,0 +1,233 @@
|
|||
//---
|
||||
// gint:thread:thread - Thread management
|
||||
//---
|
||||
#include <gint/thread.h>
|
||||
#include <gint/timer.h>
|
||||
#include <gint/drivers.h>
|
||||
#include <gint/std/setjmp.h>
|
||||
#include <gint/std/stdlib.h>
|
||||
#include <gint/std/string.h>
|
||||
|
||||
#include <gint/display.h>
|
||||
|
||||
/* Scheduler hardware pre-calculated information */
|
||||
uint32_t thread_tmu_interrupt_id;
|
||||
uint32_t thread_tmu_tcr_addr;
|
||||
uint32_t thread_tmu_tcr_mask;
|
||||
uint32_t thread_tmu_tstr_addr;
|
||||
uint32_t thread_tmu_tstr_mask;
|
||||
|
||||
/* scheduler software information */
|
||||
struct thread *thread_scheduler_queue;
|
||||
struct thread *thread_scheduler_current;
|
||||
int thread_scheduler_gint_timer_id;
|
||||
|
||||
/* Kernel information */
|
||||
uint32_t thread_kernel_idle_stack[THREAD_KERNEL_IDLE_STACK_SIZE];
|
||||
struct thread thread_kernel_idle;
|
||||
|
||||
/* other information */
|
||||
thread_t thread_id_counter;
|
||||
jmp_buf kerneljmp;
|
||||
|
||||
|
||||
|
||||
//---
|
||||
// Internal thread actions
|
||||
//---
|
||||
/* thread_find_by_id(): Try to find specific thread using his ID
|
||||
This function SHOULD be called in "thread atomic" context ! */
|
||||
static int thread_find_by_id(thread_t tid, struct thread ***target)
|
||||
{
|
||||
struct thread **thread;
|
||||
|
||||
thread = &thread_scheduler_queue;
|
||||
while (*thread != NULL) {
|
||||
if ((*thread)->private.id == tid) {
|
||||
*target = thread;
|
||||
return (0);
|
||||
}
|
||||
thread = &(*thread)->private.next;
|
||||
}
|
||||
return (-1);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//---
|
||||
// User thread interface
|
||||
//---
|
||||
|
||||
/* thread_create(): Create a new thread */
|
||||
int thread_create(thread_t *tid, int nb_arg, void *function, ...)
|
||||
{
|
||||
struct thread *thread;
|
||||
uintptr_t *tmp;
|
||||
va_list ap;
|
||||
|
||||
/* Check first created thread */
|
||||
uint32_t test = thread_atomic_start();
|
||||
if (thread_scheduler_queue == NULL) {
|
||||
if (setjmp(kerneljmp) != 0) {
|
||||
thread_atomic_stop();
|
||||
return (0xdeadbeef);
|
||||
}
|
||||
timer_start(thread_scheduler_gint_timer_id);
|
||||
}
|
||||
|
||||
/* create the new thread */
|
||||
thread = (struct thread *)calloc(1, sizeof(struct thread));
|
||||
if (thread == NULL) {
|
||||
thread_atomic_stop();
|
||||
return (-1);
|
||||
}
|
||||
thread->private.stack = (uintptr_t)malloc(THREAD_STACK_SIZE);
|
||||
if (thread->private.stack == 0x00000000) {
|
||||
free(thread);
|
||||
thread_atomic_stop();
|
||||
return (-1);
|
||||
}
|
||||
thread->context.cpu.reg[15] = thread->private.stack + THREAD_STACK_SIZE;
|
||||
thread->status = THREAD_STATUS_RUNNING;
|
||||
thread->context.cpu.pr = (uintptr_t)thread_kernel_terminate_trampoline;
|
||||
thread->context.cpu.spc = (uintptr_t)function;
|
||||
thread->context.cpu.ssr = 0x40000000;
|
||||
va_start(ap, function);
|
||||
for (int i = 0; i < nb_arg; ++i) {
|
||||
if (i <= 7) {
|
||||
thread->context.cpu.reg[4 + i] = va_arg(ap, uintptr_t);
|
||||
continue;
|
||||
}
|
||||
tmp = (uintptr_t*)((uintptr_t)thread->context.cpu.reg[15]);
|
||||
tmp[0 - i] = va_arg(ap, uintptr_t);
|
||||
}
|
||||
va_end(ap);
|
||||
|
||||
/* link the thread to the queue */
|
||||
thread->private.next = thread_scheduler_queue;
|
||||
thread_scheduler_queue = thread;
|
||||
|
||||
/* generate the thread ID */
|
||||
*tid = thread_id_counter;
|
||||
thread->private.id = thread_id_counter;
|
||||
thread_id_counter = thread_id_counter + 1;
|
||||
thread_atomic_stop();
|
||||
|
||||
/* DEBUG */
|
||||
extern uint32_t thread_atomic_sr_save;
|
||||
extern uint32_t thread_atomic_counter;
|
||||
dclear(C_WHITE);
|
||||
dtext(1, 1, C_BLACK, "Thread created !");
|
||||
dprint(1, 32, C_BLACK, "start: %#x", test);
|
||||
dprint(1, 40, C_BLACK, "thread_atomic_sr_save: %#x", thread_atomic_sr_save);
|
||||
dprint(1, 48, C_BLACK, "thread_atomic_counter: %#x", thread_atomic_counter);
|
||||
dupdate();
|
||||
while (1);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* thread_join(): Wait a thread terminason */
|
||||
int thread_join(thread_t tid, void **retval)
|
||||
{
|
||||
struct thread **thread;
|
||||
void *tmp;
|
||||
|
||||
/* wait until the target thread is not in the zombies state */
|
||||
while (1) {
|
||||
/* Try to find the thread and check her status */
|
||||
thread_atomic_start();
|
||||
if (thread_find_by_id(tid, &thread) != 0) {
|
||||
thread_atomic_stop();
|
||||
return (-1);
|
||||
}
|
||||
if ((*thread)->status == THREAD_STATUS_ZOMBIE)
|
||||
break;
|
||||
|
||||
/* clean wait (TODO: sched_yield()) */
|
||||
thread_atomic_stop();
|
||||
__asm__ volatile ("sleep");
|
||||
}
|
||||
|
||||
/* destroy the thread */
|
||||
tmp = *thread;
|
||||
*retval = (void*)((uintptr_t)(*thread)->private.ret);
|
||||
*thread = (*thread)->private.next;
|
||||
free(tmp);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* thread_exit(): Terminate the calling thread */
|
||||
//TODO: cleanup_push() all unpopped clean-up handlers !
|
||||
//TODO: block any signals !
|
||||
void thread_exit(void *retval)
|
||||
{
|
||||
/* free'd thread internal memory */
|
||||
thread_atomic_start();
|
||||
thread_scheduler_current->status = THREAD_STATUS_ZOMBIE;
|
||||
thread_scheduler_current->private.ret = retval;
|
||||
free((void *)thread_scheduler_current->private.stack);
|
||||
thread_atomic_stop();
|
||||
|
||||
/* Check if the thread is the last */
|
||||
if (thread_scheduler_queue == thread_scheduler_current
|
||||
&& thread_scheduler_current->private.next == NULL) {
|
||||
timer_pause(thread_scheduler_gint_timer_id);
|
||||
longjmp(kerneljmp, 1);
|
||||
}
|
||||
|
||||
/* wait until a scheduler interrupt */
|
||||
thread_atomic_stop();
|
||||
while (1) {
|
||||
__asm__ volatile ("sleep");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
//---
|
||||
// Driver part
|
||||
//---
|
||||
|
||||
/* init(): setup the scheduler */
|
||||
static void init(void)
|
||||
{
|
||||
struct timer_hw_info tinfo;
|
||||
int delay;
|
||||
|
||||
/* initialize scheduler */
|
||||
thread_scheduler_queue = NULL;
|
||||
thread_scheduler_current = NULL;
|
||||
thread_id_counter = 0;
|
||||
|
||||
/* intialize idle thread */
|
||||
memset(&thread_kernel_idle, 0x00, sizeof(struct thread));
|
||||
thread_kernel_idle.context.cpu.reg[15] =
|
||||
(uintptr_t)&thread_kernel_idle_stack;
|
||||
thread_kernel_idle.context.cpu.reg[15] +=
|
||||
THREAD_KERNEL_IDLE_STACK_SIZE;
|
||||
thread_kernel_idle.context.cpu.spc =
|
||||
(uintptr_t)&thread_kernel_idle_code;
|
||||
thread_kernel_idle.context.cpu.pr =
|
||||
(uintptr_t)&thread_kernel_terminate_trampoline;
|
||||
|
||||
/* setup scheduler's timer */
|
||||
delay = 1000000 / THREAD_SCHEDULER_FREQUENCY;
|
||||
if(!delay) delay = 1;
|
||||
thread_scheduler_gint_timer_id = timer_setup(TIMER_ANY, delay, NULL);
|
||||
if (thread_scheduler_gint_timer_id < 0) return;
|
||||
timer_get_hw_info(thread_scheduler_gint_timer_id, &tinfo);
|
||||
thread_tmu_tcr_addr = (uintptr_t)tinfo.hardware.address.tcr;
|
||||
thread_tmu_tcr_mask = (uintptr_t)tinfo.interrupt.mask.tcr.unf;
|
||||
thread_tmu_tstr_addr = (uintptr_t)tinfo.hardware.address.tstr;
|
||||
thread_tmu_tstr_mask = (uintptr_t)tinfo.interrupt.mask.tstr.str;
|
||||
thread_tmu_interrupt_id = (uintptr_t)tinfo.interrupt.id;
|
||||
}
|
||||
|
||||
/* The thread scheduler is consider like a driver */
|
||||
gint_driver_t drv_thread = {
|
||||
.name = "THREAD",
|
||||
.init = init
|
||||
};
|
||||
GINT_DECLARE_DRIVER(4, drv_thread);
|
|
@ -304,6 +304,53 @@ int timer_timeout(void volatile *arg)
|
|||
return TIMER_STOP;
|
||||
}
|
||||
|
||||
//---
|
||||
// Debugging function
|
||||
//---
|
||||
/* timer_get_hw_info(): Get hardware information */
|
||||
int timer_get_hw_info(int id, struct timer_hw_info *info)
|
||||
{
|
||||
uint16_t etmu_event[6] = { 0x9e0, 0xc20, 0xc40, 0x900, 0xd00, 0xfa0 };
|
||||
|
||||
/* check error */
|
||||
if (info == NULL && id >= timer_count())
|
||||
return (-1);
|
||||
|
||||
/* The timer should be installed... */
|
||||
if(!timers[id])
|
||||
return (-2);
|
||||
|
||||
/* check timer hardware type (ETMU or TMU) */
|
||||
if(id < 3) {
|
||||
tmu_t *T = &TMU[id];
|
||||
info->hardware.address.tstr = (void *)TSTR;
|
||||
info->hardware.address.tcor = (void *)&T->TCOR;
|
||||
info->hardware.address.tcnt = (void *)&T->TCNT;
|
||||
info->hardware.address.tcr = (void *)&T->TCR;
|
||||
info->hardware.context.tcor = T->TCOR;
|
||||
info->hardware.context.tcnt = T->TCNT;
|
||||
info->hardware.context.tcr = T->TCR.word;
|
||||
info->interrupt.mask.tcr.unf = 0x0100;
|
||||
info->interrupt.mask.tcr.unie = 0x0020;
|
||||
info->interrupt.mask.tstr.str = 1 << id;
|
||||
info->interrupt.id = 0x400 + (0x20 * id);
|
||||
} else {
|
||||
etmu_t *T = &ETMU[id - 3];
|
||||
info->hardware.address.tstr = (void *)&T->TSTR;
|
||||
info->hardware.address.tcor = (void *)&T->TCOR;
|
||||
info->hardware.address.tcnt = (void *)&T->TCNT;
|
||||
info->hardware.address.tcr = (void *)&T->TCR;
|
||||
info->hardware.context.tcor = T->TCOR;
|
||||
info->hardware.context.tcnt = T->TCNT;
|
||||
info->hardware.context.tcr = T->TCR.byte;
|
||||
info->interrupt.mask.tcr.unf = 0x0002;
|
||||
info->interrupt.mask.tcr.unie = 0x0001;
|
||||
info->interrupt.mask.tstr.str = 0x01;
|
||||
info->interrupt.id = etmu_event[id - 3];
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
//---
|
||||
// Driver initialization
|
||||
//---
|
||||
|
|
Loading…
Reference in New Issue