Compare commits

...

4 Commits

Author SHA1 Message Date
Yatis 413c2b57f2 Thread:
* allow exception when atomic operation is used !
* Add documentation
2021-01-03 20:58:49 +01:00
Yatis 6399ed6f40 Thread:
* add (real) scheduler interface
* add signals interface
* add idle thread interface (prototype)
* add thread attribute interface (thread behaviour configuration)
* add (real) kernel yield function
* add fx9860 support.
* add kernel stack, used in the kernel part.
* update kernel part
* fix scheduler timer stop/reload error.
* fix mutex deadlock
* fix setjmp / longjmp functions

Know issue:
    If a thread use the "longjmp()" feature, a crash will occur, not during the
    non-local goto, but when the first "return" involved. I don't know exactly
    why this happens, but I think that is because I use many times
    "thread_atomic_start()" and if an exception occurs during this part, a crash
    will occur.

    Currently, the refactoring is (probably) coming to the end and the thread
    management is pretty stable on a simple project, this is why I don't fix
    this issue now.
2021-01-03 11:46:21 +01:00
Yatis 2c5cef3015 Thread:
* add mutex interface (complete but see "issues")
* fix (and update) scheduler (heigh-level, thread_schedule() function)
* fix timer debugging functions (compilation part)

Issue:
    The "thread_mutex_lock()" function will crash the device if it used in
    multiple threads at the same time. I cannot find why this happens because
    when I test all the mutex API in one thread context, nothing go wrong, so,
    it's possibly the scheduler fault(?)

    But, for now, I will update the "user" interface and the scheduler part for
    the next update, I will fix this issue as soon as the complete (first
    version) of thread implementation API is written.
2020-12-29 14:48:34 +01:00
Yatis f02ca55429 Add thread support (SH4)
* thread context switching (support ETMU and TMU)
* thread interface (not optimal)
* thread scheduler (basic, non-preemptif)
* thread signals interface (not implemented yet but linked to the scheduler)
* thread driver (not complete yet)
2020-12-26 23:59:02 +01:00
18 changed files with 2582 additions and 13 deletions

14
TODO
View File

@ -12,8 +12,22 @@ Extensions on existing code:
* core: run destructors when a task-switch results in leaving the app
* core rtc: use qdiv10 to massively improve division performance
* topti: let the font specify letter and word spacing
* thread:
- provide better interface for thread creation:
- created thread is unable the be in zombie state
- created thread is linked to another thread (like a parent)
- provide interface for the idle thread
- allow menu-return
- allow custom idle handler
- provide better interface for the "main" thread:
- allow custom jmp_buff, which will overwrite the died thread's
context with the saved jmpbuf context and perform a
longjmp() with it.
- pseudo-preemption using pthread_yield() design(?)
- move the keyboard driver into a thread(?)
Future directions.
* A re-implementation of virtual timer
* A complete file system abstraction
* Integrate overclock management
* Audio playback using TSWilliamson's libsnd method

View File

@ -185,6 +185,13 @@ SECTIONS
*(.ilram)
/* Code that must remain mapped is placed here */
*(.gint.mapped)
/* thread kernel stack */
_thread_kernel_stack_end = . ;
. = . + 2048 ;
_thread_kernel_stack_start = . ;
. = ALIGN(16);
} > ilram AT> rom
@ -222,13 +229,13 @@ SECTIONS
/* Code that must remain permanently mapped (.gint.mapped); relocated
to start of user RAM at startup, accessed through P1 */
.gint.mapped ALIGN(4) : ALIGN(4) {
/*.gint.mapped ALIGN(4) : ALIGN(4) {
_lgmapped = LOADADDR(.gint.mapped);
*(.gint.mapped)
. = ALIGN(16);
} > rram AT> rom
_sgmapped = SIZEOF(.gint.mapped);
_sgmapped = SIZEOF(.gint.mapped);*/

View File

@ -147,6 +147,10 @@ SECTIONS
/* Code that must remain mapped is placed here */
*(.gint.mapped)
/* thread kernel stack */
_thread_kernel_stack_end = . ;
. = . + 2048 ;
_thread_kernel_stack_start = . ;
. = ALIGN(16);
} > ilram AT> rom

51
include/gint/std/setjmp.h Normal file
View File

@ -0,0 +1,51 @@
#ifndef GINT_STD_SETJMP
# define GINT_STD_SETJMP
#include <stddef.h>
#include <stdint.h>
/* Custom(?) jmp_buf struct
@note:
We save only r8 ~ r15 and SR / PC registers. The SR register is saved first
because the longjump() can be involved with a different register bank.
So, to avoid this, it's simpler to restore the saved SR first then restore
all registers (see <src/setjmp/longjmp.S>). */
struct __jmp_buf
{
uint32_t sr;
uint32_t reg[8];
uint32_t gbr;
uint32_t macl;
uint32_t mach;
uint32_t pr;
};
/* User jmp_buf alias */
typedef struct __jmp_buf jmp_buf[1];
/* setjmp(): store the calling environment in ENV
This function saves various information about the calling environment
(typically, the stack pointer, the instruction pointer, the values of some
registers and the signal mask) in the buffer ENV for later use by longjmp().
In this case, setjmp() returns 0 */
extern int setjmp(jmp_buf env);
/* longjmp(): effectuate non-local goto using the saved ENV
This function uses the information saved in env to transfer control back to
the point where setjmp() was called and to restore ("rewind") the stack to its
state at the time of the setjmp() call.
Following a successful longjmp(), execution continues as if setjmp() had
returned for a second time. This "fake" return can be distinguished from a
true setjmp() call because the "fake return" returns the value provided in
val. If the programmer mistakenly passes the value 0 in val, the "fake" return
will instead return 1. */
extern void longjmp(jmp_buf env, int val);
#endif /* GINT_STD_SETJMP */

672
include/gint/thread.h Normal file
View File

@ -0,0 +1,672 @@
#ifndef GINT_THREAD
# define GINT_THREAD
#include <stddef.h>
#include <stdint.h>
#include <stdarg.h>
/* define the jmp_buf type */
#include <gint/std/setjmp.h>
/* Define the default context switching frequency */
#ifndef THREAD_SCHEDULER_FREQUENCY
# define THREAD_SCHEDULER_FREQUENCY 16
#endif
/* Define the default thread stack size */
#ifndef THREAD_STACK_SIZE
# define THREAD_STACK_SIZE (4 * 1024)
#endif
/* Define the default kernel idle thread's stack size */
#ifndef THREAD_IDLE_STACK_SIZE
# define THREAD_IDLE_STACK_SIZE 32
#endif
//---
// Thread attribute interface
//---
/* define the thread wartermark, used to check if the mutex is valid */
#define THREAD_ATTR_WATERMARK (~0xdeadbeef)
/* define the fundamental data type and alias for thread attribute */
struct thread_attr_s {
struct {
enum {
THREAD_ATTR_JOINABLE = 0,
THREAD_ATTR_DETACHED = 1,
} detach;
enum {
THREAD_ATTR_LONGJMP_DISABLE = 0,
THREAD_ATTR_LONGJMP_ENABLE = 1,
} longjmp;
} state;
struct {
uint32_t watermark;
} private;
};
typedef struct thread_attr_s thread_attr_t;
/* thread_attr_init(): Initialize thread attribute object
This function initializes the thread attributes object pointed to by ATTR
with default attribute values. After this call, individual attributes of the
object can be set using various related functions formatted like
"thread_attr_*()" and then the object can be used in one or more
"thread_create()" calls that create threads.
@return
* negative value if ATTR is NULL
* 0 if success */
extern int thread_attr_init(thread_attr_t *attr);
/* thread_attr_setdetachstate() and thread_attr_getdetachstate()
The "thread_attr_setdetachstate()" function sets the detach state attribute
of the thread attributes object referred to by attr to the value specified
in detachstate.
The detach state attribute determines whether a thread created using the
thread attributes object attr will be created in a joinable or a detached
state.
The following values may be specified in detachstate:
- THREAD_ATTR_DETACHED
Threads that are created using ATTR will be created in a detached state.
- THREAD_ATTR_JOINABLE
Threads that are created using attr will be created in a joinable state.
The default setting of the detach state attribute in a newly initialized
thread attributes object is THREAD_ATTR_JOINABLE.
The "thread_attr_getdetachstate()" returns the detach state attribute of
the thread attributes object attr in the buffer pointed to by detachstate.
@return
* negative value if ATTR is NULL or uninitialized
* 0 if success */
extern int thread_attr_setdetachstate(thread_attr_t *attr, int detachstate);
extern int thread_attr_getdetachstate(thread_attr_t *attr, int *detachstate);
/* thread_attr_enablelongjmp() thread_attr_getlongjmpstatus()
This feature is CUSTOM to the Gint kernel. It will allow, when a thread with
this attribute die, instead of release its memories and removed it from the
scheduler, allow it to perform a "longjmp()" and return ("rewinds") to the
"thread_create()" function involved to create the current thread. The
original "thread_create()" will return the special value:
THREAD_CREATE_LONGJMP_RETURN.
In the case of Gint, this feature is very useful because the "main" function
can be threaded and we can return the to "dark-side" of the kernel to manage
destructors or to recall the main function again. We cann allow this while
continuing running (potential) threaded drivers that can continue working
while being isolated from the Gint "bootstrap" part.
The following values may be specified in "status":
- THREAD_ATTR_LONGJMP_ENABLE
Enable the longjmp() when the thread come to the end.
- THREAD_ATTR_LONGJMP_DISABLE
Disable the longjmp() when the thread come to the end.
@return
* negative value if ATTR is NULL or uninitialized
* 0 if success */
extern int thread_attr_enablelongjmp(thread_attr_t *attr, int status);
extern int thread_attr_getlongjmpstatus(thread_attr_t *attr, int *status);
/* thread_attr_destroy(): Destroy thread attribute object
When a thread attributes object is no longer required, it should be destroyed
using the "pthread_attr_destroy()" function. Destroying a thread attributes
object has no effect on threads that were created using that object.
Once a thread attributes object has been destroyed, it can be reinitialized
using "thread_attr_init()". Any other use of a destroyed thread attributes
object has undefined results.
@return
* negative value if ATTR is NULL or uninitialized
* 0 if success */
extern int thread_attr_destroy(thread_attr_t *attr);
//---
// Signals interface
//
// A “signal” is a software interrupt delivered to a process. The operating
// system uses signals to report exceptional situations to an executing
// program. Some signals report errors such as references to invalid
// memory addresses; others report asynchronous events, such as
// disconnection of a phone line.
//
// If you anticipate an event that causes signals, you can define a handler
// function and tell the operating system to run it when that particular
// type of signal arrives.
//
// Finally, one thread can send a signal to another process; this allows a
// parent thread to abort a child, or two related thread to communicate
// and synchronize.
//
//---
/* Define thread signal set type */
typedef uint32_t sigset_t;
/* Define signals handler type */
typedef void (*sighandler_t)(int);
/* Define fake signal functions. */
#define SIG_ERR ((sighandler_t) -1) /* Error return. */
#define SIG_DFL ((sighandler_t) 0) /* Default action. */
#define SIG_IGN ((sighandler_t) 1) /* Ignore signal. */
/* Define the number of signals availbable */
#define NSIG 19
/* Define all signum
(note: All signals are not currently supported, but will be in near future)*/
#define SIGKILL 0 /* (unblockable) Killed */
#define SIGSTOP 1 /* (unblockable) Stop */
#define SIGTERM 2 /* (unblockable) Termination request. */
#define SIGCONT 3 /* Continue. */
#define SIGTRAP 4 /* Trace/breakpoint trap. */
#define SIGILL 5 /* Illegal instruction. */
#define SIGTSTP 6 /* Keyboard stop. */
#define SIGABRT 7 /* Abnormal termination. */
#define SIGCHLD 8 /* Child terminated or stopped. */
#define SIGPOLL 9 /* Pollable event occurred (System V). */
#define SIGVTALRM 10 /* Virtual timer expired. */
#define SIGPROF 11 /* Profiling timer expired. */
#define SIGUSR1 12 /* User-defined signal 1. */
#define SIGUSR2 13 /* User-defined signal 2. */
#define SIGHUP 14 /* hang up. */
#define SIGINT 15 /* interruption */
#define SIGBUS 16 /* bus error */
#define SIGFPE 17 /* fata aritmetic error */
#define SIGSEGV 18 /* segmentation violation */
//---
// User thread interface
//---
/* define special return value with the thread_create() function */
#define THREAD_CREATE_LONGJMP_RETURN (0xd1ceca5e)
/* Define thread ID alias */
typedef uint32_t thread_t;
/* cpu_ctx: whole SH3-based CPU hardware context definition */
struct cpu_ctx {
uint32_t reg[16];
uint32_t gbr;
uint32_t macl;
uint32_t mach;
uint32_t ssr;
uint32_t spc;
uint32_t pr;
};
/* struct thread: Thread structure definition */
struct thread {
/* hardware context */
struct cpu_ctx context;
/* thread configuration */
struct thread_attr_s attr;
/* signals information */
struct {
sighandler_t handler[NSIG];
sigset_t pending;
sigset_t blocking;
} signals;
/* thread scheduler information */
struct {
/* thread status */
enum {
THREAD_STATUS_PAUSED = 0,
THREAD_STATUS_RUNNING = 1,
THREAD_STATUS_ZOMBIE = 2,
THREAD_STATUS_DEAD = 3
} status;
/* thread identifier */
thread_t id;
/* hierarchical information */
struct thread *next;
} scheduler;
/* private information */
struct {
uintptr_t stack; /* original stack address */
uintptr_t ret; /* saved exit value */
jmp_buf jmpbuf; /* longjmp feature */
struct thread *sibling; /* sibling thread list */
struct thread *child; /* child thread list */
struct thread *parent; /* thread parent address */
} private;
};
/* thread_create(): Create a new thread
This function creates a new thread which starts execution by invoking
"start_routine()"; You can passe many argument as you want and
all args is passed as argument of start_routine().
The new thread terminates in one of the following ways:
- It calls "thread_exit()", specifying an exit status value that is
available to another thread in the same process that calls
"thread_join()"
- It returns from start_routine(). This is equivalent to calling
"thread_exit()" with the value supplied in the return statement.
- It is canceled (see "thread_cancel()").
- If one thread with the special THREAD_ATTR_MAIN_THREAD die. In this case,
all thread created with this special (custom) attribute will be killed
The "attr" argument points to a pthread_attr_t structure whose contents
are used at thread creation time to determine attributes for the new thread;
this structure is initialized using "thread_attr_init()" and related
functions. If "attr" is NULL, then the thread is created with default
attributes.
Before returning, a successful call to "thread_create()" stores the ID of
the new thread in the buffer pointed to by "thread"; this identifier is used
to refer to the thread in subsequent calls to other "thread_*" functions.
@return:
* negative value if error occurs
* 0 if success
*/
extern int thread_create(thread_t *thread,
thread_attr_t *attr, void *start_routine, ...);
/* Makes sure an argument is always provided, for va_arg() and for definite the
last arguments sended by the user */
#define THREAD_CREATE_ARG_END_WATERMARK (0xdeb0cad0)
#define thread_create(...) thread_create(__VA_ARGS__,\
THREAD_CREATE_ARG_END_WATERMARK)
/* thread_join(): Waits for the thread specified by thread to terminate
This function waits for the thread specified by thread to terminate.
If that thread has already terminated, then "thread_join()" returns
immediately. The thread specified by THREAD must be joinable.
If RETVAL is not NULL, then "thread_join()" copies the exit status of the
target thread (i.e., the value that the target thread supplied to
"thread_exit()") into the location pointed to by RETVAL. If the target thread
was canceled, then THREAD_CANCELED is placed in the location pointed to by
RETVAL.
If multiple threads simultaneously try to join with the same thread, the
results are undefined. If the thread calling "thread_join()" is canceled,
then the target thread will remain joinable (i.e., it will not be detached).
The "thread_tryjoin()" function have the same behaviour that the
"thread_join()" excet that it will not block the current thread and returns
directly if the wanted thread is busy or unjoinable.
@return:
* negative value if error occurs
* 0 if success */
extern int thread_join(thread_t thread, void **retvel);
extern int thread_tryjoin(thread_t thread, void **retval);
/* thread_exit(): Terminate the calling thread
This function will terminates the calling thread and returns a value via
retval that (if the thread is joinable) is available to another thread in the
same process that calls "thread_join()".
Any clean-up handlers established by pthread_cleanup_push(3) that have not
yet been popped, are popped (in the reverse of the order in which they were
pushed) and executed. If the thread has any thread-specific data, then, after
the clean-up handlers have been executed, the corresponding destructor
functions are called, in an unspecified order.
When a thread terminates, process-shared resources (e.g., mutexes, condition
variables, semaphores, and file descriptors) are not released. */
extern void thread_exit(void *retval);
/* thread_kill(): Send signal to a thread
This fucntion will raise any signals to any thread. The value of the thread
can have special value:
* if thread is positive then signal is sent to the thread with the ID
specified by thread.
* if thread is equals 0, then signal is sent to threads which have the
calling has parent.
@return:
* negative value if error occurs
* 0 if success */
extern int thread_kill(thread_t thread, int sig);
/* thread_signal(): sets the disposition of the signal signum to handler, which
is either SIG_IGN, SIG_DFL, or the address of a
programmer-defined function (a "signal handler").
This part provides a simple interface for establishing an action for a
particular signal.
If the signal signum is delivered to the process, then one of the following
happens:
- If the disposition is set to SIG_IGN, then the signal is ignored.
- If the disposition is set to SIG_DFL, then the default action associated
with the signal
- If the disposition is set to a function, then first either the disposition
is reset to SIG_DFL, or the signal is blocked, and then handler is called
with argument signum. If invocation of the handler caused the signal to
be blocked, then the signal is unblocked upon return from the handler.
The signals SIGKILL and SIGSTOP cannot be caught or ignored.
@return
the previous value of the signal handler, or SIG_ERR on error. */
extern void (*thread_signal(int signum, void (*handler)(int)))(int);
/* thread_terminate(): KERNEL-ONLY: destroy a thread
This function will destroy a thread regardless of its attributes, scheduler
status, ... This function is used by the kernel to remove definitively a
thread. */
extern int thread_terminate(struct thread *thread);
//---
// Thread mutex interface
//
// To have better control of resources and how threads access them, Gint
// implements a "mutex" object, which can help avoir race conditions and
// other concurrency issues. The term mutex refers to mutual exclusion.
//---
/* define the thread wartermark, used to check if the mutex is valid */
#define THREAD_MUTEX_WATERMARK 0xdeadbeef
/* mutex returnable value */
enum
{
thread_mutex_retval_success = 0,
thread_mutex_retval_busy = 1,
thread_mutex_retval_error = 2,
thread_mutex_retval_nomem = 3,
thread_mutex_retval_timeout = 4
};
/* mutex type */
enum
{
thread_mutex_type_plain = (1 << 1), /* non-recursive */
thread_mutex_type_recursive = (1 << 2), /* support recursive */
thread_mutex_type_timed = (2 << 3) /* support timeout */
};
/* define the fundamental data type and alias for thread mutex */
struct thread_mutex_s
{
uint32_t watermark;
uint32_t lock;
uint8_t type;
thread_t owner;
struct {
int id;
int abord;
} timer;
};
typedef struct thread_mutex_s thread_mutex_t;
/* thread_mutex_init(): Creates a new mutex object with type TYPE.
This function will initialize the mutex MUTEX using the type TYPE. The type
define the behaviour of the mutex, basically, you have:
- thread_mutex_type_plain
A mutex that does not support timeout, or test and return
- thread_mutex_type_recursive
A mutex that support recursive locking, which mean that the the owning
thread can lock it more than once without causing deadlock
- thread_mutex_type_timed
A mutex that supports timeout
Not all combinations of mutex types are valid for the TYPE argument.
Valid uses of mutex types for the TYPE argument are:
- thread_mutex_type_plain
A non-recursive mutex that does not support timeout
- thread_mutex_type_timed
A non-recursive mutex that does support timeout
- thread_mutex_type_plain | thread_mutex_type_timed
A recursive mutex that does not support timeout
- thread_mutex_type_timed | thread_mutex_type_timed
A recursive mutex that does support timeout
@return
* thread_mutex_retval_success If successful initialized
* thread_mutex_retval_error If error occur */
extern int thread_mutex_init(thread_mutex_t *mutex, int type);
/* thread_mutex_lock(): Block the current thread until the mutex is unlocked.
This function will bock the calling thread until the mutex is locked.
The behaviour is undefined if the current thread has already locked the
mutex and the mutex is not recursive.
Prior calls to "thread_mutex_unlock()" to the same mutex syncronize-with
this operations (if this operation success), and all lock/unlock operations
on any given mutex form a single total order (similar to the modification
order of an atomic).
@return:
* thread_mutex_retval_success If lock was obtained
* thread_mutex_retval_error If error occur */
extern int thread_mutex_lock(thread_mutex_t *mutex);
/* thread_mutex_timedlock(): Block the current thread until the mutex is
locked or until the time TIME has been reached.
This function will bock the calling thread until the mutex is locked or if
the time TIME (millisecond) has been reached. If the current has been
already locked the mutex and the mutex is not recursive, or if the mutex
does not support timeout an error will be returned.
Prior calls to "thread_mutex_unlock()" to the same mutex syncronize-with
this operations (if this operation success), and all lock/unlock operations
on any given mutex form a single total order (similar to the modification
order of an atomic).
@return:
* thread_mutex_retval_success if the lock was obtained
* thread_mutex_retval_error if error occur */
extern int thread_mutex_timedlock(thread_mutex_t *mutex, uint64_t delay_us);
/* thread_mutex_trylock(): Try to lock the mutex without blocking.
This function tries to lock the mutex without blocking. It return
immediately if the mutex is already locked.
Prior calls to "thread_mutex_unlock()" to the same mutex syncronize-with
this operations (if this operation success), and all lock/unlock operations
on any given mutex form a single total order (similar to the modification
order of an atomic).
@return:
* thread_mutex_retval_success if the lock was obtained
* thread_mutex_retval_busy if the mutex is already locked
* thread_mutex_retval_error if error occur */
extern int thread_mutex_trylock(thread_mutex_t *mutex);
/* thread_mutex_unlock(): Unlocks the mutex
This function synchronize-with subsequent "thread_mutex_lock()",
"thread_mutext_trylock()" and "thread_mutex_timedlock()" calls on the same
mutex. All lock/unlock operations on any given mutex form a signle total
order (similar to the modification order of an atomic).
@return:
* thread_mutex_retval_success if unlocked
* thread_mutex_retval_busy if not locked but need other unlock call
* thread_mutex_retval_error if error occur */
extern int thread_mutex_unlock(thread_mutex_t *mutex);
/* thread_mutex_destroy(): Destroy the mutex.
This function will destroy the mutex. If there are any thread waiting on the
mutex, the behaviour is undefined. */
extern void thread_mutex_destroy(thread_mutex_t *mutex);
//---
// Thread atomic operation
//---
/* thread_atomic_start(): Start atomic operation
This function will block interruptions and exception until
"thread_atomic_stop()" is called. This is really useful when you need to
secure some tricky part of code (like driver kernel-level implementation).
But be carefull: your code executed after this function SHOULD be
EXCEPTION-SAFE ! Otherwise, a crash will occur and Gint can do nothing to
avoid it because is hardware specific. If you need to secure shared data,
use mutex instead.
This implementation is recursive-safe and will return:
* SR value when you enter in "atomic" operation (first call)
* 0 if you are already in a "atomic" operation (x call)
To return to the "normal" operation, you should call "thread_atomic_stop()"
as many time as you have involved with "thread_atomic_start()". */
extern uint32_t thread_atomic_start(void);
/* thread_atomic_stop(): Stop atomic opration
This function will try to return to the "normal" mode and will return:
* negative value If error occur
* 0 If you are alwayrs in "atomic" mode
* the restored SR value If you are returned to the "clasic" mode */
extern uint32_t thread_atomic_stop(void);
//---
// Signals management (kernel-level)
//---
enum {
thread_signals_deliver_retval_running = 0,
thread_signals_deliver_retval_stopped = 1,
thread_signals_deliver_retval_dead = 2,
};
/* Macros for constructing status values. */
#define __W_EXITCODE(ret, sig) ((ret) << 8 | (sig))
#define __W_STOPCODE(sig) ((sig) << 8 | 0x7f)
/* thread_signals_raise(): Raise a signal
This function will raise a signal for a given thread. This function is used
to raise kernel-based signals like SIGKILL. */
extern int thread_signals_raise(struct thread *thread, int sig);
/* thread_signals_deliver_pending(): Deliver all pending signals
This function is KERNEL-ONLY and SHOULD NEVER be called because it is
exclusively reserved for the internal thread scheduler. (see
<gint/thread/scheduler.c> for more information). */
extern int thread_signals_pending_deliver(struct thread *thread);
/* thread_signals_replace(): Replace current signal handler
This function will replace the signum signal handler with the new handler.
This part is used by the "thread_kill()" function. */
extern void (*thread_signals_replace(struct thread *thread,
int signum, void (*handler)(int)))(int);
/* thread_signals_sigreturn(): KERNEL-ONLY: Signals return handler
This function is involved when an custom signals handler come to the end. It
will restore previous context and stack then it will invalidate the current
thread to force the scheudler to not save the current thread context; this
mecanism will restore the previous context. */
extern void thread_signals_sigreturn(void);
//---
// Idle thread interface
//---
/* thread_idle_initialize(): Initialize the idle thread */
extern void thread_idle_init(void);
extern void thread_idle_uninit(void);
/* thread_idle_get(): Return the idle thread address */
extern struct thread *thread_idle_get(void);
//---
// Scheduler interface (kernel-only)
//---
/* thread_sched_initialize(): Initialize the scheduler */
extern void thread_sched_init(void);
extern void thread_sched_uninit(void);
/* thread_sched_start() / thread_sched_stop: Control the scheduler timer */
extern void thread_sched_start(void);
extern void thread_sched_stop(void);
/* thread_sched_add(): thread_sched_remove(): handle internal thread queue */
extern int thread_sched_add(struct thread *thread);
extern int thread_sched_remove(struct thread *thread);
/* thread_sched_find(): Find a thread strcuture using his ID */
extern struct thread *thread_sched_find(thread_t thread);
/* thread_sched_get_current(): Get the current thread context */
extern struct thread *thread_sched_get_current(void);
/* thread_sched_get_counter(): Return the number of thread in the queue */
extern int thread_sched_get_counter(void);
/* thread_sched_invalidate(): Invalidate the current thread.
This function will invalidate the current thread, it means that the current
thread will not be saved on the next schedule. This is really useful for the
signals' management to restore previously saved thread context.
You SHOULD never use it. */
extern void thread_sched_invalidate(void);
//---
// Kernel thread interface
//---
/* thread_kernel_terminate_trampoline(): Termination trampoline code.
This function is automatically involved when a thread return from his main
procedure and will invoke the "thread_exit()" function with the returned
value.
You SHOULD never use it. */
extern void thread_kernel_terminate_trampoline(void);
/* thread_kernel_yield(): Cause the calling thread to relinquish the CPU */
extern void thread_kernel_yield(void);
/* thread_kernel_exit(): Terminate the calling thread */
extern void thread_kernel_exit(void *retval);
#endif /* GINT_THREAD */

View File

@ -236,4 +236,51 @@ void timer_reload(int timer, uint32_t delay);
bytes) and increments it. */
int timer_timeout(volatile void *arg);
//---
// Debugging interface
//
// @note
// This function is used internally by the thread scheduler to precalculate
// timer information needed to switch context quickly. This is a draft
// interface that can be grandly improved.
//---
/* Type of hardware information that can be get using the timer debugging
interface */
struct timer_debug_info
{
/* context related information */
struct {
struct {
void *tstr;
void *tcor;
void *tcnt;
void *tcr;
} address;
struct {
uint32_t tcor;
uint32_t tcnt;
uint16_t tcr;
} context;
} hardware;
/* interrupt related information */
struct {
struct {
struct {
uint16_t unf;
uint16_t unie;
} tcr;
struct {
uint8_t str;
} tstr;
} mask;
int id;
} interrupt;
};
/* timer_get_hw_info(): Get timer hardware information */
extern int timer_debug_get_hw_info(int id, struct timer_debug_info *info);
#endif /* GINT_TIMER */

View File

@ -62,18 +62,38 @@ _gint_inth_7305:
1: .long 0xff000028
#endif
/* SH7305-TYPE INTERRUPT HANDLER ENTRY - 40 BYTES */
/* SH7305-TYPE INTERRUPT HANDLER ENTRY - 54 BYTES (2 blocks in total) */
_gint_inth_7305:
/* First, we need to check the thread scheduler.
Due to the interrupt handler architecture, we cannot store this part
here. So, we need to jump into the scheduler's checking procedure,
but without modify the current user context.
@note
PR will be updated when "jsr" instruction is executed, R15 will be
lost if we push something into it ; So, let's send them using
non-conventional method. (see <gint/thread/scheduler.S> for more
information)
On SH4 processor, a special register named SGR save the value of R15
when interrupts / exceptions occur, but the SH3 doesn't have it, this
is why we save the stack value before using it. */
mov r15, r1
sts pr, r0
mov.l r0, @-r15
mov.l 1f, r2
jsr @r2
nop
/* Save caller-saved registers which might currently be in use by the
interrupted function, as we don't know what the callback will do */
sts.l pr, @-r15
stc.l gbr, @-r15
sts.l mach, @-r15
sts.l macl, @-r15
/* Get the event code from the INTEVT register */
mov.l 1f, r0
mov.l 2f, r0
mov.l @r0, r0
/* Interrupt codes start at 0x400 */
@ -96,8 +116,15 @@ _gint_inth_7305:
rte
nop
.zero 24
1: .long 0xff000028
/* force 4-alignement */
.zero 2
/* information */
1: .long _thread_kernel_sched_interrupt_procedure
2: .long 0xff000028
/* block padding */
.zero 8
.first_entry:
#ifdef FX9860G

View File

@ -9,6 +9,9 @@
#include <gint/gint.h>
#include <gint/hardware.h>
#include <gint/exc.h>
#include <gint/thread.h>
#include <gint/display.h>
#include "kernel.h"
@ -61,7 +64,7 @@ static void regcpy(uint32_t * restrict l, int32_t s, uint32_t * restrict r)
s -= 16;
}
}
#define regcpy(l, s, r) regcpy(l, (int32_t)s, r)
#define regcpy(l, s, r) regcpy(l, (intptr_t)s, r)
/* regclr(): Clear a memory region using symbol information
@r Source pointer (base address)
@ -77,7 +80,7 @@ static void regclr(uint32_t *r, int32_t s)
s -= 16;
}
}
#define regclr(r, s) regclr(r, (int32_t)s)
#define regclr(r, s) regclr(r, (intptr_t)s)
/* callarray(): Call an array of functions (constructors or destructors)
@f First element of array
@ -147,6 +150,7 @@ int start(int isappli, int optnum)
regcpy(&lyram, &syram, &ryram);
}
#if 0
#ifdef FX9860G
/* Copy permanently-mapped code to start of user RAM (on fx-CG 50 it
is loaded along ILRAM contents) */
@ -160,6 +164,7 @@ int start(int isappli, int optnum)
fixups[i] += (uint32_t)rgmapped;
}
#endif
#endif
/* Install gint, switch VBR and initialize drivers */
kinit();
@ -175,12 +180,32 @@ int start(int isappli, int optnum)
callarray(&bctors, &ectors);
int rc = 1;
while(1)
{
/* "Pre-main" loop.
We're trying to run the main function using a thread, if the thread
creation fail or if we return from the thread creation using the
longjmp() feature (see <gint/thread.h> for more information), we will
use the "classic" way to do the job.
@note
When we return from the "thread_create()" function with the longjmp()
feature, we are always in a thread execution (the scheduler always
performs context switch) */
int rc = -1;
thread_t thread;
thread_attr_t attr;
thread_attr_init(&attr);
thread_attr_setdetachstate(&attr, THREAD_ATTR_DETACHED);
thread_attr_enablelongjmp(&attr, THREAD_ATTR_LONGJMP_ENABLE);
int retval = thread_create(&thread, &attr, &main, isappli, optnum);
if (retval == 0)
thread_join(thread, (void**)&rc);
if (retval != 0 && (uint32_t)retval != THREAD_CREATE_LONGJMP_RETURN)
rc = main(isappli, optnum);
if(!gint_restart) break;
/* main loop */
while (gint_restart == 0) {
gint_osmenu();
rc = main(isappli, optnum);
}
callarray(&bdtors, &edtors);

91
src/std/setjmp.S Normal file
View File

@ -0,0 +1,91 @@
/*
** gint:std:setjmp - performing a nonlocal goto
*/
.section .gint.mapped, "ax"
.align 4
.global _setjmp
.type _setjmp, @function
/* setjmp(): saves various information about the calling environment */
_setjmp:
/* block interrupts / exceptions (this part should b exception-safe) */
stc sr, r1
mov r1, r3
mov #0x10, r2
shll8 r2
shll16 r2
or r2, r1
ldc r1, sr
/* save current environment */
add #52, r4
sts.l pr, @-r4
sts.l mach, @-r4
sts.l macl, @-r4
stc.l gbr, @-r4
mov.l r15, @-r4
mov.l r14, @-r4
mov.l r13, @-r4
mov.l r12, @-r4
mov.l r11, @-r4
mov.l r10, @-r4
mov.l r9, @-r4
mov.l r8, @-r4
mov.l r3, @-r4 ! previous SR register status
/* restore sr then exit */
ldc r3, sr
rts
mov #0, r0
.global _longjmp
.type _longjmp, @function
/* longjmp(): restore the saved environment */
_longjmp:
/* block interrupt */
stc sr, r1
mov #0x10, r2
shll8 r2
shll16 r2
or r2, r1
ldc r1, sr
/* check arg validity and save it into unbankable register to avoid
error when the "old" SR register is restored */
tst r5, r5
mov r4, r8
bf/s env_switch
mov r5, r9
mov #1, r9
env_switch:
/* load the old SR regsiter first to force register bank switch (if
occur) then move the context and the returned value into non-saved
(by the setjmp context) registers. */
ldc.l @r8+, sr
mov r8, r4
mov r9, r0
/* restore all old registers */
mov.l @r4+, r8
mov.l @r4+, r9
mov.l @r4+, r10
mov.l @r4+, r11
mov.l @r4+, r12
mov.l @r4+, r13
mov.l @r4+, r14
mov.l @r4+, r15
ldc.l @r4+, gbr
lds.l @r4+, macl
lds.l @r4+, mach
lds.l @r4+, pr
rts
nop

87
src/thread/atomic.S Normal file
View File

@ -0,0 +1,87 @@
/*
** gint:thread:atomic - Thread atomic helper
*/
.section .gint.mapped, "ax"
.align 4
.global _thread_atomic_start
.type _thread_atomic_start, @function
/* thread_atomic_start(): Mask interrupts and exceptions */
_thread_atomic_start:
/* Check if the user is currently into an atomic state and update
atomic counter. */
mov.l thread_atomic_counter, r1
mov.l @r1, r2
tst r2, r2
add #1, r2
mov.l r2, @r1
bf/s atomic_start_exit
mov #-1, r0
/* Mask interruptions / exceptions using the IMASK and BL filed of SR */
stc sr, r1
mov r1, r0
mov.l sr_mask, r2
or r2, r1
ldc r1, sr
/* Save "old" SR register. */
mov.l thread_atomic_sr_save, r1
mov.l r0, @r1
atomic_start_exit:
rts
nop
.global _thread_atomic_stop
.type _thread_atomic_stop, @function
/* thread_atomic_stop(): Unmask (if possible) interrupts / exceptions signals */
_thread_atomic_stop:
/* Check if the device is currently into an atomic operation, then
update the counter and, if needed, restore the SR register. */
mov.l thread_atomic_counter, r1
mov.l @r1, r0
tst r0, r0
bt atomic_end_error
cmp/eq #1, r0
add #-1, r0
mov.l r0, @r1
bf/s atomic_end_exit
mov #0, r0
/* Restore saved SR register data. */
mov.l thread_atomic_sr_save, r1
mov.l @r1, r0
ldc r0, sr
bra atomic_end_exit
nop
atomic_end_error:
mov #-1, r0
atomic_end_exit:
rts
nop
.align 4
thread_atomic_counter: .long _thread_atomic_counter
thread_atomic_sr_save: .long _thread_atomic_sr_save
sr_mask: .long 0x000000f0
/*
** Global symbols
*/
.global _thread_atomic_sr_save
.type _thread_atomic_sr_save, @object
.global _thread_atomic_counter
.type _thread_atomic_counter, @object
.align 4
_thread_atomic_sr_save: .long 0x00000000
_thread_atomic_counter: .long 0x00000000

98
src/thread/attributes.c Normal file
View File

@ -0,0 +1,98 @@
//---
// gint:thread:attributes - Thread attribute helper
//---
#include <gint/thread.h>
#include <gint/std/string.h>
//---
// User interface
//---
/* thread_attr_init(): Initialize thread attribute object */
int thread_attr_init(thread_attr_t *attr)
{
if (attr == NULL)
return (-1);
thread_atomic_start();
memset(attr, 0x00, sizeof(struct thread_attr_s));
attr->state.detach = THREAD_ATTR_JOINABLE;
attr->state.longjmp = THREAD_ATTR_LONGJMP_DISABLE;
attr->private. watermark = THREAD_ATTR_WATERMARK;
thread_atomic_stop();
return (0);
}
/* thread_attr_setdetachstate() and thread_attr_getdetachstate()
The "thread_attr_setdetachstate()" function sets the detach state attribute
of the thread attributes object referred to by attr to the value specified
in detachstate.
The "thread_attr_getdetachstate()" returns the detach state attribute of
the thread attributes object attr in the buffer pointed to by detachstate.
*/
int thread_attr_setdetachstate(thread_attr_t *attr, int detachstate)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
switch (detachstate) {
case THREAD_ATTR_JOINABLE:
case THREAD_ATTR_DETACHED:
break;
default:
return (-1);
}
thread_atomic_start();
attr->state.detach = detachstate;
thread_atomic_stop();
return (0);
}
int thread_attr_getdetachstate(thread_attr_t *attr, int *detachstate)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
thread_atomic_start();
*detachstate = attr->state.detach;
thread_atomic_stop();
return (0);
}
/* thread_attr_enablelongjmp() thread_attr_getlongjmpstatus()
Enable / disable the custom longjmp feature */
int thread_attr_enablelongjmp(thread_attr_t *attr, int status)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
switch (status) {
case THREAD_ATTR_LONGJMP_ENABLE:
case THREAD_ATTR_LONGJMP_DISABLE:
break;
default:
return (-1);
}
thread_atomic_start();
attr->state.longjmp = status;
thread_atomic_stop();
return (0);
}
int thread_attr_getlongjmpstatus(thread_attr_t *attr, int *status)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
thread_atomic_start();
*status = attr->state.longjmp;
thread_atomic_stop();
return (0);
}
/* thread_attr_destroy(): Destroy thread attribute object */
int thread_attr_destroy(thread_attr_t *attr)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
thread_atomic_start();
memset(attr, 0x00, sizeof(struct thread_attr_s));
thread_atomic_stop();
return (0);
}

52
src/thread/idle.c Normal file
View File

@ -0,0 +1,52 @@
//---
// gint:thread:idle - Idle thread interface
//---
#include <gint/thread.h>
#include <gint/std/string.h>
/* define private symbols */
static struct thread thread_idle;
static uint32_t thread_idle_stack[THREAD_IDLE_STACK_SIZE];
//---
// Internal
//---
/* thread_idle_code(): Code executed by the idle thread */
static void thread_idle_code(void)
{
while (1) {
__asm__ volatile ("sleep");
}
}
//---
// Module primitive
//---
/* thread_idle_init(): Initialize the idle thread */
void thread_idle_init(void)
{
memset(&thread_idle, 0x00, sizeof(struct thread));
thread_idle.context.reg[15] = (uintptr_t)thread_idle_stack;
thread_idle.context.reg[15] += (THREAD_IDLE_STACK_SIZE << 2);
thread_idle.context.spc = (uintptr_t)&thread_idle_code;
thread_idle.context.pr = (uintptr_t)0xa0000000;
}
void thread_idle_uninit(void)
{
return;
}
//---
// User interface
//---
/* thread_idle_get(): Return the idle thread */
struct thread *thread_idle_get(void)
{
return (&thread_idle);
}

330
src/thread/kernel.S Normal file
View File

@ -0,0 +1,330 @@
/*
** gint:thread:kernel - Thread Kernel inferface
** This file contains all "kernel" low-level part of the thread module:
** - thread_kernel_sched_procedure() Thread scheduler procedure
** - thread_kernel_terminate_trampoline() Thread terminsation trampoline
** - thread_kernel_yield() Relinquish the CPU
** - thread_kernel_exit() Terminate the current thread
*/
.section .gint.mapped, "ax"
.align 4
.global _thread_kernel_sched_interrupt_procedure
.type _thread_kernel_sched_interrupt_procedure, @function
/* thread_kernel_sched_procedure(): Scheduler entry
This "function" is involved when ANY interruption occur, this is the only way
to add this scheduler without broke all Gint's VBR software architecture.
The scheduler use a ~128hz timer to schedule thread contexts. During the
"driver" initialization (yes, the thread module is considered like a driver
for Gint's eyes) one timer has been locked and all hardware information about
it has been pre-calculated during this part.
@note
Due to the current Gint's VBR architecture, this function is involved each
time an interruption occur. By the fact that this function is involved like
a common subroutine, PR register has been saved into the R0 register and the
R15 register has been saved into the R1 register.
We are always with the "privileged" bank register configuration. So, we only
can use r0~r7 register until the current thread context has been saved.
When involved, R0 content the PR register snapshot when interrupts occur,
same for the R1 register which content the stack snapshot. (Yes, I know that
the SH4-based MPU provide the SGR register which saves the stack snapshot
when interrupts / exceptions occur, but we need to be SH3 compatible) */
_thread_kernel_sched_interrupt_procedure:
/* First, check if the interruption is the scheduler's timer */
mov.l intevt_register, r2
mov.l thread_sched_tmu_interrupt_id, r3
mov.l @r2, r2
mov.l @r3, r3
cmp/eq r2, r3
bt _thread_kernel_sched_entry
rts
nop
_thread_kernel_sched_entry:
/* Stop the scheduler's timer to "avoid" counting the context
switching time. */
/* The scheduler can use ETMU instead of a classical TMU. ETMU have
slow writting interraction, but have the same access size that the
TMU, this is why we use slow operation here without checking the
timer type. */
mov.l thread_sched_tmu_tstr_addr, r2
mov.l thread_sched_tmu_tstr_mask, r3
mov.l @r2, r2
mov.l @r3, r3
mov.b @r2, r6
not r3, r3
and r3, r6
timer_tstr_slow_stop:
mov.b r6, @r2
mov.b @r2, r7
cmp/eq r6, r7
bf timer_tstr_slow_stop
save_context:
/* check if a current thread is running */
mov.l thread_sched_current, r2
mov.l @r2, r2
tst r2, r2
bt/s schedule_thread
/* save current thread context */
add #88, r2
mov.l r0, @-r2 /* R0 contains user PR register snapshot */
stc.l spc, @-r2
stc.l ssr, @-r2
sts.l mach, @-r2
sts.l macl, @-r2
stc.l gbr, @-r2
mov.l r1, @-r2 /* R1 contains user R15 register snapshot */
mov.l r14, @-r2
mov.l r13, @-r2
mov.l r12, @-r2
mov.l r11, @-r2
mov.l r10, @-r2
mov.l r9, @-r2
mov.l r8, @-r2
stc.l R7_BANK, @-r2
stc.l R6_BANK, @-r2
stc.l R5_BANK, @-r2
stc.l R4_BANK, @-r2
stc.l R3_BANK, @-r2
stc.l R2_BANK, @-r2
stc.l R1_BANK, @-r2
stc.l R0_BANK, @-r2
schedule_thread:
/* Call high-level abraction
We need to save the R1/R0 registers (that contains the user stack/PR
register snapshot respectively) because if the scheduler fail, we
SHOULD be able to restore the context before returning from here.
We alse need to swtich the stack to avoid undefined behaviour if the
current thread is destroyed during the schedule (we currently always
use the thread stack here) */
mov.l thread_kernel_stack, r15
mov.l r1, @-r15
mov.l r0, @-r15
mov.l thread_schedule, r0
jsr @r0
nop
#ifdef THREAD_SCHEDULER_DEBUG
mov.l thread_schedule_debug, r1
jsr @r1
mov r0, r4
#endif
tst r0, r0
lds.l @r15+, pr
bt/s scheduler_restart_timer
mov.l @r15+, r15
context_restore:
/* update_current_thread */
mov.l thread_sched_current, r1
mov.l r0, @r1
/* restore the new context */
ldc.l @r0+, R0_BANK
ldc.l @r0+, R1_BANK
ldc.l @r0+, R2_BANK
ldc.l @r0+, R3_BANK
ldc.l @r0+, R4_BANK
ldc.l @r0+, R5_BANK
ldc.l @r0+, R6_BANK
ldc.l @r0+, R7_BANK
mov.l @r0+, r8
mov.l @r0+, r9
mov.l @r0+, r10
mov.l @r0+, r11
mov.l @r0+, r12
mov.l @r0+, r13
mov.l @r0+, r14
mov.l @r0+, r15
ldc.l @r0+, gbr
lds.l @r0+, macl
lds.l @r0+, mach
ldc.l @r0+, ssr
ldc.l @r0+, spc
lds.l @r0+, pr
scheduler_restart_timer:
/* Check if the scheduler uses TMU or ETMU */
mov.l thread_sched_gint_timer_id, r0
mov.l @r0, r0
mov #3, r1
cmp/ge r1, r0
/* Get scheduler timer pre-calculated information */
mov.l thread_sched_tmu_tcr_addr, r0
mov.l thread_sched_tmu_tcr_mask, r1
mov.l @r0, r0
mov.l @r1, r1
not r1, r1
mov.l thread_sched_tmu_tcor_addr, r2
mov.l thread_sched_tmu_tcnt_addr, r3
mov.l @r2, r2
mov.l @r3, r3
mov.l thread_sched_tmu_tstr_addr, r4
mov.l thread_sched_tmu_tstr_mask, r5
mov.l @r4, r4
bf/s tmu_restart_timer
mov.l @r5, r5
etmu_restart_timer:
/* clear interrupt flag (ETMU.TCR.UNF = 0) */
mov.b @r0, r6
and r1, r6
etmu_tcr_slow_unf_clear:
mov.b r6, @r0
mov.b @r0, r7
cmp/eq r6, r7
bf etmu_tcr_slow_unf_clear
/* reload the timer counter (ETMU.TCNT = ETMU.TCOR) */
mov.l @r2, r6
etmu_slow_reload:
mov.l r6, @r3
mov.l @r3, r7
cmp/eq r6, r7
bf etmu_slow_reload
/* Check if we really need to restart the timer or not */
mov.l thread_sched_counter, r0
mov.l @r0, r0
mov #2, r1
cmp/ge r1, r0
bf process_switch
/* Restart timer (ETMU.TSTR = 1) */
mov.b @r4, r6
or r5, r6
etmu_tstr_slow_restart:
mov.b r6, @r4
mov.b @r4, r7
cmp/eq r6, r7
bf etmu_tstr_slow_restart
bra process_switch
nop
tmu_restart_timer:
/* clear interrupt flag (TMU.TCR.UNF) */
mov.w @r0, r6
and r1, r6
mov.w r6, @r0
/* reload the timer counter */
mov.l @r2, r6
mov.l r6, @r3
/* Check if we really need to restart the timer or not */
mov.l thread_sched_counter, r0
mov.l @r0, r0
mov #2, r1
cmp/ge r1, r0
bf process_switch
/* Restart timer (TMU.TSTR) */
mov.b @r4, r6
or r5, r6
mov.b r6, @r4
process_switch:
rte
nop
.align 4
/* kernel information */
thread_kernel_stack: .long _thread_kernel_stack_start
/* scheduler global information */
thread_sched_current: .long _thread_sched_current
thread_sched_counter: .long _thread_sched_counter
/* High-level functions */
thread_schedule: .long _thread_schedule
#ifdef THREAD_SCHEDULER_DEBUG
thread_schedule_debug: .long _thread_schedule_debug
#endif
/* Timer hardware pre-calculated information */
thread_sched_gint_timer_id: .long _thread_sched_gint_timer_id
thread_sched_tmu_interrupt_id: .long _thread_sched_tmu_interrupt_id
thread_sched_tmu_tcr_addr: .long _thread_sched_tmu_tcr_addr
thread_sched_tmu_tcr_mask: .long _thread_sched_tmu_tcr_mask
thread_sched_tmu_tstr_mask: .long _thread_sched_tmu_tstr_mask
thread_sched_tmu_tstr_addr: .long _thread_sched_tmu_tstr_addr
thread_sched_tmu_tcor_addr: .long _thread_sched_tmu_tcor_addr
thread_sched_tmu_tcnt_addr: .long _thread_sched_tmu_tcnt_addr
/* Other information */
intevt_register: .long 0xff000028
.global _thread_kernel_yield
.type _thread_kernel_yield, @function
/* thread_kernel_yield(): Cause the calling thread to relinquish the CPU
This function will move the calling thread to the end of the queue for its
static priority and new thread gets on run. */
/* TODO preemption ? */
_thread_kernel_yield:
/* start atomic operation + bank switch*/
stc sr, r0
mov r0, r1
mov #0x30, r2
shll8 r2
shll16 r2
or r2, r1
ldc r1, sr
/* prepare bank switch */
stc R0_BANK, r0
sts pr, r1
ldc r0, ssr
ldc r1, spc
/* simulate the interrupt by switching the register bank */
mov #0x20, r1
shll8 r1
shll16 r1
or r1, r0
ldc r0, sr
/* prepare to jump into thread_kernel_scheudler_entry() */
xor r0, r0
mov r15, r1
mov.l 1f, r2
jsr @r2
nop
pouet:
sleep
bra pouet
nop
.global _thread_kernel_terminate_trampoline
.type _thread_kernel_terminate_trampoline, @function
/* thread_kernel_terminate_trampoline()
call the thread_exit function with the returned value.*/
_thread_kernel_terminate_trampoline:
mov.l 2f, r1
jmp @r1
mov r0, r4
.align 4
1: .long _thread_kernel_sched_entry
2: .long _thread_exit

238
src/thread/mutex.c Normal file
View File

@ -0,0 +1,238 @@
//---
// gint:thread:mutex - Thread mutex API
//---
#include <gint/thread.h>
#include <gint/timer.h>
#include <gint/std/string.h>
//---
// Internal functions
//---
/* thread_mutex_timer_callback()
Involved when a "thread_timedlock()" timeout is reached. This function will
request to abort the locking try. */
static int thread_mutex_timer_callback(thread_mutex_t *mutex)
{
thread_atomic_start();
mutex->timer.abord = 1;
thread_atomic_stop();
return (TIMER_STOP);
}
/* thread_mutex_check_validity(): Check the validity of the mutex
This function will check the watermark and the thread owner. It returns the
associated thread owner on success, NULL otherwise.
This function SHOUL be involved in "atomic" mode. */
static struct thread *thread_mutex_validity_check(thread_mutex_t *mutex)
{
struct thread *thread;
if (mutex == NULL || mutex->watermark != THREAD_MUTEX_WATERMARK)
return (NULL);
thread = thread_sched_get_current();
if (thread == NULL)
return (NULL);
return (thread);
}
//---
// Thread mutex API
//---
/* thread_mutex_init(): Creates a new mutex object with type TYPE */
int thread_mutex_init(thread_mutex_t *mutex, int type)
{
thread_atomic_start();
if (mutex->watermark == THREAD_MUTEX_WATERMARK) {
thread_atomic_stop();
return (thread_mutex_retval_success);
}
switch (type) {
case thread_mutex_type_plain:
case thread_mutex_type_timed:
case thread_mutex_type_timed | thread_mutex_type_recursive:
case thread_mutex_type_plain | thread_mutex_type_recursive:
break;
default:
thread_atomic_stop();
return (thread_mutex_retval_error);
}
memset(mutex, 0x00, sizeof(thread_mutex_t));
mutex->watermark = THREAD_MUTEX_WATERMARK;
mutex->type = type;
mutex->lock = 0;
mutex->timer.id = -1;
mutex->timer.abord = 0;
mutex->owner = -1;
thread_atomic_stop();
return (thread_mutex_retval_success);
}
/* thread_mutex_lock(): Block the current thread until the mutex is locked */
int thread_mutex_lock(thread_mutex_t *mutex)
{
struct thread *thread;
/* Check error */
thread_atomic_start();
thread = thread_mutex_validity_check(mutex);
if (thread == NULL) {
thread_atomic_stop();
return (thread_mutex_retval_error);
}
/* Check if the mutex is recursive */
if ((mutex->type & thread_mutex_type_recursive) != 0) {
if (mutex->owner == thread->scheduler.id) {
mutex->lock = mutex->lock + 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
}
}
/* Wait util the mutex is locked */
while (1) {
/* Check if the mutex is unlocked */
if (mutex->lock == 0) {
mutex->owner = thread->scheduler.id;
mutex->lock = 1;
break;
};
/* Force schedule */
thread_kernel_yield();
}
/* Lock and return */
thread_atomic_stop();
return (thread_mutex_retval_success);
}
int thread_mutex_timedlock(thread_mutex_t *mutex, uint64_t delay_us)
{
struct thread *thread;
int retval;
/* Check error */
thread_atomic_start();
thread = thread_mutex_validity_check(mutex);
if (thread == NULL || (mutex->type & thread_mutex_type_timed) == 0) {
thread_atomic_stop();
return (thread_mutex_retval_error);
}
/* Check if the mutex is recursive */
if (mutex->type & thread_mutex_type_recursive) {
if (mutex->owner == thread->scheduler.id) {
mutex->lock = mutex->lock + 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
}
}
/* setup the timer */
mutex->timer.abord = 0;
mutex->timer.id = timer_setup(TIMER_ANY, delay_us,
(void*)&thread_mutex_timer_callback, mutex);
if (mutex->timer.id < 0) {
thread_atomic_stop();
return (thread_mutex_retval_error);
}
/* Wait util the mutex is unlocked or timeout */
timer_start(mutex->timer.id);
while (1) {
/* Check timeout abord */
if (mutex->timer.abord == 1) {
retval = thread_mutex_retval_busy;
break;
}
/* Check if the mutex is unlocked */
if (mutex->lock == 0) {
mutex->lock = 1;
mutex->owner = thread->scheduler.id;
retval = thread_mutex_retval_success;
break;
};
/* Force schedule */
thread_kernel_yield();
}
/* destroy the timer and return */
timer_stop(mutex->timer.id);
mutex->timer.id = -1;
mutex->timer.abord = -1;
thread_atomic_stop();
return (retval);
}
/* thread_mutext_trylock(): Try to lock the mutex without blocking. */
int thread_mutex_trylock(thread_mutex_t *mutex)
{
struct thread *thread;
/* Check error */
thread_atomic_start();
thread = thread_mutex_validity_check(mutex);
if (thread == NULL) {
thread_atomic_stop();
return (thread_mutex_retval_error);
}
/* Check recursive lock */
if (mutex->type & thread_mutex_type_recursive) {
if (mutex->owner == thread->scheduler.id) {
mutex->lock = mutex->lock + 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
}
}
/* Check if the mutex is unlocked */
if (mutex->lock == 0) {
mutex->owner = thread->scheduler.id;
mutex->lock = 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
};
/* the mutex is busy */
thread_atomic_stop();
return (thread_mutex_retval_busy);
}
/* thread_mutex_unlock(): Try to unlock the mutex */
int thread_mutex_unlock(thread_mutex_t *mutex)
{
struct thread *thread;
thread_atomic_start();
thread = thread_mutex_validity_check(mutex);
if (thread == NULL
|| mutex->owner != thread->scheduler.id
|| mutex->lock == 0) {
thread_atomic_stop();
return (thread_mutex_retval_error);
}
mutex->lock = mutex->lock - 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
}
/* thread_mutex_destroy(): Destroy the mutex object */
void thread_mutex_destroy(thread_mutex_t *mutex)
{
thread_atomic_start();
mutex->watermark = 0x00000000;
mutex->type = 0;
mutex->owner = -2;
mutex->lock = 0;
if (mutex->timer.id >= 0)
timer_stop(mutex->timer.id);
thread_atomic_stop();
}

327
src/thread/scheduler.c Normal file
View File

@ -0,0 +1,327 @@
//---
// gint:thread:scheduler - Scheduler module
//---
#include <gint/thread.h>
#include <gint/timer.h>
#include <gint/std/stdlib.h>
#include <gint/display.h>
/* define symbols that will be used bu the kernel to communicate with us */
struct thread *thread_sched_queue;
struct thread *thread_sched_current;
uint32_t thread_sched_counter;
thread_t thread_sched_uuid;
/* define symbols used to pre-calculate scheudler timer related information */
uint32_t thread_sched_tmu_interrupt_id;
uint32_t thread_sched_tmu_tcr_addr;
uint32_t thread_sched_tmu_tcr_mask;
uint32_t thread_sched_tmu_tstr_mask;
uint32_t thread_sched_tmu_tstr_addr;
uint32_t thread_sched_tmu_tcor_addr;
uint32_t thread_sched_tmu_tcnt_addr;
int thread_sched_gint_timer_id;
//---
// Driver primitives
//---
/* thread_sched_init(): Initialize the scheduler */
void thread_sched_init(void)
{
struct timer_debug_info info;
uint64_t delay;
/* initialize kernel information */
thread_sched_queue = NULL;
thread_sched_current = NULL;
thread_sched_counter = 0;
thread_sched_uuid = 0;
/* generate the delay in us */
delay = 1000000 / THREAD_SCHEDULER_FREQUENCY;
if(delay == 0)
delay = 1;
/* try to lock one timer */
thread_sched_gint_timer_id = timer_setup(TIMER_ANY, delay, NULL);
if (thread_sched_gint_timer_id < 0)
return;
/* pre-calculate timer information, used by the
"thread_kernel_sched_enty()" kernel function (see
<gint/thread/kernel.S> for more information ) */
timer_debug_get_hw_info(thread_sched_gint_timer_id, &info);
thread_sched_tmu_tcr_addr = (uintptr_t)info.hardware.address.tcr;
thread_sched_tmu_tcr_mask = (uintptr_t)info.interrupt.mask.tcr.unf;
thread_sched_tmu_tstr_addr = (uintptr_t)info.hardware.address.tstr;
thread_sched_tmu_tstr_mask = (uintptr_t)info.interrupt.mask.tstr.str;
thread_sched_tmu_tcor_addr = (uintptr_t)info.hardware.address.tcor;
thread_sched_tmu_tcnt_addr = (uintptr_t)info.hardware.address.tcnt;
thread_sched_tmu_interrupt_id = (uintptr_t)info.interrupt.id;
}
/* thread_shced_uninit(): Uninitialize the scheduler */
void thread_sched_uninit(void)
{
struct thread **thread;
struct thread *tmp;
thread_atomic_start();
thread_sched_stop();
thread = &thread_sched_queue;
while (*thread != NULL) {
tmp = *thread;
*thread = (*thread)->scheduler.next;
//thread_kill(tmp, 0);
thread_sched_remove(tmp);
}
}
//---
// User interface
//---
/* thread_sched_start(): Start the scheduler timer */
void thread_sched_start(void)
{
thread_atomic_start();
timer_start(thread_sched_gint_timer_id);
thread_atomic_stop();
}
/* thread_shced_stop(): */
void thread_sched_stop(void)
{
thread_atomic_start();
timer_pause(thread_sched_gint_timer_id);
thread_atomic_stop();
}
/* thread_sched_add(): Add thread to the scheduler queue */
int thread_sched_add(struct thread *thread)
{
thread_atomic_start();
/* link the thread */
thread->scheduler.id = thread_sched_uuid;
thread->scheduler.status = THREAD_STATUS_RUNNING;
thread->scheduler.next = thread_sched_queue;
thread_sched_queue = thread;
/* update internal information */
thread_sched_uuid = thread_sched_uuid + 1;
thread_sched_counter = thread_sched_counter + 1;
thread_atomic_stop();
return (0);
}
/* thread_sched_remove(): Add thread to the scheduler queue */
int thread_sched_remove(struct thread *thread)
{
struct thread **parent;
/* Try to find the thread's parent */
thread_atomic_start();
parent = &thread_sched_queue;
while (*parent != NULL) {
if (*parent == thread)
break;
parent = &(*parent)->scheduler.next;
}
if (*parent == NULL) {
thread_atomic_stop();
return (-1);
}
/* unlink the thread */
*parent = thread->scheduler.next;
/* remove scheduler information */
thread->scheduler.id = -1;
thread->scheduler.next = NULL;
/* update internal information */
thread_sched_counter = thread_sched_counter - 1;
thread_atomic_stop();
return (0);
}
/* thread_sched_get_current(): Get the current thread */
struct thread *thread_sched_get_current(void)
{
return(thread_sched_current);
}
//---
// Kernel interface
//---
/* thread_sched_check(): Check thread validity
@return:
* 0 can be used
* -1 cannot be loaded
* -2 has been removed
*/
static int thread_sched_check(struct thread *thread)
{
if (thread->scheduler.status != THREAD_STATUS_RUNNING)
return (-1);
switch (thread_signals_pending_deliver(thread)) {
case thread_signals_deliver_retval_running: return (0);
case thread_signals_deliver_retval_stopped: return (-1);
case thread_signals_deliver_retval_dead:
default:
if (thread_terminate(thread) == 0)
return (-2);
}
return (-1);
}
/* thread_schedule(): Schedule thread queue and return the next thread
This function SHOULD NOT be involved manually, it should be be involved only
by the scheduler "handler" (see <gint/thread/kernel.S>). If you know what
you are doing and whant to call this function, you SHOULD do it in "atomic"
environment. */
struct thread *thread_schedule(void)
{
struct thread *thread;
struct thread *next;
/* Check potential error. */
if (thread_sched_queue == NULL)
return (NULL);
/* if we have no currently running thread, return directly */
if (thread_sched_current == NULL)
return (thread_sched_queue);
/* pre-calculate the next thread */
next = thread_sched_current;
if (thread_sched_current == NULL)
next = thread_sched_queue;
next = next->scheduler.next;
if (next == NULL)
next = thread_sched_queue;
do {
/* Get the potential next thread because the current can be
destroyed during operations */
thread = next;
next = next->scheduler.next;
if (next == NULL)
next = thread_sched_queue;
/* Check the thread validity */
if (thread_sched_check(thread) == 0)
return (thread);
} while (thread != thread_sched_current);
/* If no thread has been found, load idle kernel thread which
will only wait the next scheduler timer intervention.
(see <gint/thread/thread.c> for more information). */
return (thread_idle_get());
}
/* thread_sched_find(): Find the thread using its identifier */
struct thread *thread_sched_find(thread_t id)
{
struct thread *thread;
thread_atomic_start();
thread = thread_sched_queue;
while (thread != NULL) {
if (thread->scheduler.id == id) {
thread_atomic_stop();
return (thread);
}
thread = thread->scheduler.next;
}
thread_atomic_stop();
return (NULL);
}
/* invalidate the current thread to avoid context saving */
/* TODO: find better way to do the job */
/* TODO: doc */
void thread_sched_invalidate(void)
{
thread_sched_current = NULL;
}
//---
// Debugging interface
//---
#ifdef THREAD_SCHEDULER_DEBUG
#include <gint/display.h>
int context_switch_counter = 0;
/* thread_schedule_debug(): Debug scheduler, involved each context_switch */
struct thread *thread_schedule_debug(struct thread *thread)
{
/* extern uint32_t thread_tmu_interrupt_id;
extern uint32_t thread_tmu_tcr_addr;
extern uint32_t thread_tmu_tcr_mask;
extern uint32_t thread_tmu_tstr_mask;
extern uint32_t thread_tmu_tstr_addr;
dclear(C_WHITE);
dprint(1, 51, C_BLACK, "next process -> %p", thread);
dprint(1, 61, C_BLACK, "|--spc: %p", thread->context.cpu.spc);
dprint(1, 71, C_BLACK, "|--ssr: %p", thread->context.cpu.ssr);
dprint(1, 81, C_BLACK, "|--r15: %p", thread->context.cpu.reg[15]);
dprint(1, 91, C_BLACK, "|--@r0: %p", &thread->context.cpu.reg[0]);
dprint(1, 101, C_BLACK, "|--tmu_event: %#x", thread_tmu_interrupt_id);
dprint(1, 111, C_BLACK, "|--tmu_tcr_addr: %#x", thread_tmu_tcr_addr);
dprint(1, 121, C_BLACK, "|--tmu_tcr_mask: %#x", thread_tmu_tcr_mask);
dprint(1, 131, C_BLACK, "|--tmu_tstr_addr: %#x", thread_tmu_tstr_addr);
dprint(1, 141, C_BLACK, "`--tmu_tstr_mask: %#x", thread_tmu_tstr_mask);
*/
/*
struct thread **tmp;
int idx;
dclear(C_WHITE);
dprint(1, 51, C_BLACK, "new process -> %p", thread);
dtext (1, 61, C_BLACK, "thread queue:");
idx = 0;
tmp = &thread_scheduler_queue;
while (*tmp != NULL) {
dprint(1, 71 + (10 * idx), C_BLACK, "[%d] - %p", idx, *tmp);
tmp = &(*tmp)->private.next;
idx = idx + 1;
}
dupdate();
if (idx != 1)
while (1);
*/
context_switch_counter = context_switch_counter + 1;
// if (context_switch_counter < 3)
return (thread);
uint8_t tstr_before = ((uint8_t *)thread_sched_tmu_tstr_addr)[0];
uint8_t tstr_after = ((uint8_t *)thread_sched_tmu_tstr_addr)[0] & (~thread_sched_tmu_tstr_mask);
dclear(C_WHITE);
dprint(1, 11, C_BLACK, "next process -> %p", thread);
dprint(1, 21, C_BLACK, "TMU -> ID: %#x", (uint8_t *)thread_sched_gint_timer_id);
dprint(1, 31, C_BLACK, "TMU -> TSTR: %#x", tstr_before);
dprint(1, 41, C_BLACK, "TMU -> TCOR: %#x", ((uint32_t*)thread_sched_tmu_tcor_addr)[0]);
dprint(1, 51, C_BLACK, "TMU -> TCNT: %#x", ((uint32_t*)thread_sched_tmu_tcnt_addr)[0]);
dprint(1, 71, C_BLACK, "TMU -> TSTR %#x", thread_sched_tmu_tstr_mask);
dprint(1, 81, C_BLACK, "TMU -> TSTR %#x", tstr_after);
dupdate();
while (1);
}
#endif

166
src/thread/signals.c Normal file
View File

@ -0,0 +1,166 @@
//---
// gint:thread:signal - Thread Signals management
//---
#include <gint/thread.h>
#include <gint/std/string.h>
//---
// internals functions
//---
/* thread_signals_deliver(): Deliver a signal */
static int thread_signals_deliver(struct thread *thread, int sig)
{
// check if the signal is ignored
if (thread->signals.handler[sig] == SIG_IGN)
return (thread_signals_deliver_retval_running);
// Check custom signal managment
if (thread->signals.handler[sig] != SIG_DFL) {
/* save current context into user's stack and update stack */
thread->context.reg[15] -= sizeof(struct cpu_ctx) + 3;
thread->context.reg[15] = thread->context.reg[15] >> 2 << 2;
memcpy((void*)(uintptr_t)thread->context.reg[15],
&thread->context, sizeof(struct cpu_ctx));
/* clean context and set the handler */
for (int i = 0 ; i < 14 ; i = i + 1)
thread->context.reg[i] = 0x00000000;
thread->context.gbr = 0x00000000;
thread->context.macl = 0x00000000;
thread->context.mach = 0x00000000;
thread->context.ssr = 0x40000000;
thread->context.spc = (uintptr_t)thread->signals.handler[sig];
thread->context.pr = (uintptr_t)&thread_signals_sigreturn;
/* send the signal number */
thread->context.reg[4] = sig;
return (thread_signals_deliver_retval_running);
}
/* default actions */
switch (sig) {
/* Stop the current thread (TODO: stop all child ?) */
case SIGSTOP:
thread->scheduler.status = THREAD_STATUS_PAUSED;
return (thread_signals_deliver_retval_stopped);
/* Wake up the parent */
case SIGCONT:
if (thread->private.parent != NULL) {
thread_signals_raise(
thread->private.parent, SIGCHLD);
}
return (0);
/* Nothing, just wake up the process */
case SIGCHLD:
return (0);
/* Process terination (default) */
case SIGTERM: // termination (TODO: cleanup part !!!)
case SIGINT: // interruption
case SIGHUP: // hang up
case SIGKILL: // kill
case SIGBUS: // bus error
case SIGFPE: // fatal arithmetic error
case SIGSEGV: // segmentation violation
default:
return (thread_signals_deliver_retval_dead);
}
}
//---
// Kernel interface
//---
/* thread_signals_raise():Raise a signal */
int thread_signals_raise(struct thread *thread, int sig)
{
if (sig >= NSIG)
return (-1);
thread_atomic_start();
if (thread == NULL
|| thread->scheduler.status == THREAD_STATUS_ZOMBIE) {
thread_atomic_stop();
return (-1);
}
thread->scheduler.status = THREAD_STATUS_RUNNING;
thread->signals.pending |= 1 << sig;
thread_atomic_stop();
return (0);
}
/* thread_signals_deliver_pending(): Deliver pending signals */
int thread_signals_pending_deliver(struct thread *thread)
{
sigset_t sig;
sigset_t block;
int retval;
retval = 0;
thread_atomic_start();
block = thread->signals.blocking;
block = block & ~(1 << SIGKILL);
block = block & ~(1 << SIGSTOP);
block = block & ~(1 << SIGTERM);
sig = thread->signals.pending & (~block);
for (int i = 0; i < NSIG && retval == 0; ++i) {
if ((sig & (1 << i)) == 0)
continue;
retval = thread_signals_deliver(thread, i);
thread->signals.pending &= ~(1 << i);
}
thread_atomic_stop();
return (retval);
}
/* thread_signals_sigreturn(): Signals return handler */
void thread_signals_sigreturn(void)
{
struct thread *thread;
void *stack;
/* get the current thread */
thread_atomic_start();
thread = thread_sched_get_current();
if (thread == NULL) {
thread_atomic_stop();
while (1) { thread_kernel_yield(); };
}
/* dump old context */
stack = (void*)(uintptr_t)thread->context.reg[15];
memcpy(&thread->context, stack, sizeof(struct cpu_ctx));
/* restore stack */
thread->context.reg[15] += (sizeof(struct cpu_ctx) + 3) >> 2 << 2;
/* Invalidate the thread to force the scheduler to not save the current
context otherwise, the restored context will be overwritten. */
thread_sched_invalidate();
/* force schedule, on the next schedule, we will return to the job */
thread_atomic_stop();
while (1) {
thread_kernel_yield();
}
}
/* thread_signals_replace(): Replace current signal handler */
void (*thread_signals_replace(struct thread *thread,
int signum, void (*handler)(int)))(int)
{
void (*old)(int);
if (signum >= NSIG)
return (SIG_ERR);
thread_atomic_start();
if (thread == NULL
|| thread->scheduler.status == THREAD_STATUS_ZOMBIE) {
thread_atomic_stop();
return (SIG_ERR);
}
old = thread->signals.handler[signum];
thread->signals.handler[signum] = handler;
thread_atomic_stop();
return (old);
}

286
src/thread/thread.c Normal file
View File

@ -0,0 +1,286 @@
//---
// gint:thread:thread - Thread management
//---
#include <gint/thread.h>
#include <gint/timer.h>
#include <gint/drivers.h>
#include <gint/std/setjmp.h>
#include <gint/std/stdlib.h>
#include <gint/std/string.h>
#include <gint/display.h>
#undef thread_create
//---
// Internal thread actions
//---
#if 0
/* thread_kill_child()
If the THREAD_ATTR_MAIN_THREAD is set, this function is involved when the
thread die. This function will kill all child thread generated by the parent
*/
/* TODO */
static void thread_kill_child(struct thread **thread)
{
/* walking */
if (*thread == NULL)
return;
thread_kill_child(&(*thread)->private.child);
thread_kill_child(&(*thread)->private.sibling);
/* destroy the thread */
/*thread_atomic_start();
thread_kill(*thread, 0);
thread_sched_remove(*thread);
*thread = NULL;*/
thread_atomic_stop();
}
#endif
//---
// User thread interface
//---
/* thread_create(): Create a new thread */
int thread_create(thread_t *tid, thread_attr_t *attr, void *function, ...)
{
struct thread *thread;
struct thread *parent;
uint32_t argbuf[16];
int arg_counter;
uintptr_t arg;
va_list ap;
/* Start the scheduler to avoid small freeze with the first thread */
thread_atomic_start();
thread_sched_start();
/* create the new thread */
thread = (struct thread *)calloc(1, sizeof(struct thread));
if (thread == NULL) {
thread_atomic_stop();
return (-1);
}
thread->private.stack = (uintptr_t)malloc(THREAD_STACK_SIZE + 4);
if (thread->private.stack == 0x00000000) {
free(thread);
thread_atomic_stop();
return (-1);
}
thread->context.reg[15] = ((thread->private.stack + 3) >> 2 << 2);
thread->context.reg[15] += ((THREAD_STACK_SIZE + 3) >> 2 << 2);
thread->context.pr = (uintptr_t)thread_kernel_terminate_trampoline;
thread->context.spc = (uintptr_t)function;
thread->context.ssr = 0x40000000;
/* dump arguments */
va_start(ap, function);
for (arg_counter = 0; arg_counter <= 16; ++arg_counter) {
arg = va_arg(ap, uint32_t);
if (arg == THREAD_CREATE_ARG_END_WATERMARK)
break;
if (arg_counter < 4) {
thread->context.reg[arg_counter + 4] = arg;
} else {
argbuf[arg_counter - 4] = arg;
}
}
va_end(ap);
if (arg_counter >= 4) {
arg_counter -= 4;
thread->context.reg[15] -= arg_counter << 2;
memcpy((void*)(uintptr_t)thread->context.reg[15],
argbuf, arg_counter << 2);
}
/* dump attribute if needed */
if (attr != NULL) {
memcpy(&thread->attr, attr, sizeof(struct thread_attr_s));
/* check longjmp() feature */
if (attr->state.longjmp == THREAD_ATTR_LONGJMP_ENABLE) {
if (setjmp(thread->private.jmpbuf) != 0) {
thread_atomic_stop();
return (THREAD_CREATE_LONGJMP_RETURN);
}
}
/* check joinable state */
if (attr->state.detach == THREAD_ATTR_JOINABLE) {
parent = thread_sched_get_current();
if (parent != NULL) {
thread->private.parent = parent;
thread->private.sibling = parent->private.child;
parent->private.child = thread;
}
}
}
/* link the thread to the scheduler */
thread_sched_add(thread);
*tid = thread->scheduler.id;
/* initialize signals */
for (int i = 0; i < NSIG; ++i)
thread->signals.handler[i] = SIG_DFL;
thread->signals.pending = 0x00000000;
thread->signals.blocking = 0x00000000;
thread_atomic_stop();
return (0);
}
/* thread_tryjoin()
Same behaviour than thread_join() except that if the thread is not in the
ZOMBIE_STATE its return directly with an error. */
int thread_tryjoin(thread_t id, void **retval)
{
struct thread *thread;
/* Try to find the thread */
thread_atomic_start();
thread = thread_sched_find(id);
if (thread == NULL) {
thread_atomic_stop();
return (-1);
}
/* check its status */
if (thread->scheduler.status != THREAD_STATUS_ZOMBIE) {
thread_atomic_stop();
return (-2);
}
/* destroy the thread */
if (retval != NULL)
*retval = (void*)((uintptr_t)thread->private.ret);
thread_sched_remove(thread);
free(thread);
thread_atomic_stop();
return (0);
}
/* thread_join(): Wait a thread terminason */
int thread_join(thread_t id, void **retval)
{
while (thread_tryjoin(id, retval) != 0) {
thread_kernel_yield();
}
return (0);
}
/* thread_exit(): Terminate the calling thread */
void thread_exit(void *retval)
{
struct thread *thread;
/* get the current thread */
thread_atomic_start();
thread = thread_sched_get_current();
if (thread == NULL) {
thread_atomic_stop();
while (1) { thread_kernel_yield(); };
}
/* Check thread longjmp() attribute feature */
if (thread->attr.private.watermark == THREAD_ATTR_WATERMARK
&& thread->attr.state.longjmp == THREAD_ATTR_LONGJMP_ENABLE) {
longjmp(thread->private.jmpbuf, 1);
}
/* block all signals */
thread->signals.blocking = -1;
thread->private.ret = (uintptr_t)retval;
thread_signals_raise(thread, SIGTERM);
thread_atomic_stop();
/* Wait and yield */
while (1) {
thread_kernel_yield();
}
}
/* thread_kill(): Send signals */
/* TODO: hierarchical handling ! */
int thread_kill(thread_t id, int sig)
{
struct thread *thread;
int ret;
thread_atomic_start();
thread = thread_sched_find(id);
if (thread == NULL) {
thread_atomic_stop();
return (-1);
}
ret = thread_signals_raise(thread, sig);
thread_atomic_stop();
return (ret);
}
/* thread_signal(): set the disposition of the signal signum to handler */
void (*thread_signal(int signum, void (*handler)(int)))(int)
{
struct thread *thread;
void (*ret)(int);
thread_atomic_start();
thread = thread_sched_get_current();
if (thread == NULL) {
thread_atomic_stop();
return (SIG_ERR);
}
ret = thread_signals_replace(thread, signum, handler);
thread_atomic_stop();
return (ret);
}
//---
// Kernel interface
//---
/* thread_terminate(): Terminate a thread */
int thread_terminate(struct thread *thread)
{
/* freed the stack */
if (thread->private.stack != 0x00000000)
free((void*)thread->private.stack);
/* check is the thread is detached*/
if (thread->attr.private.watermark == THREAD_ATTR_WATERMARK
&& thread->attr.state.detach == THREAD_ATTR_DETACHED) {
thread_sched_remove(thread);
free(thread);
return (0);
}
/* the thread is joinable, wait until someone read its ret value
(the thread will be destroyed in the "thread_tryjoin()" function ) */
thread->scheduler.status = THREAD_STATUS_ZOMBIE;
return (1);
}
//---
// Driver part
//---
/* init(): setup the scheduler */
static void init(void)
{
/* initialize the scheduler */
thread_sched_init();
/* intialize the idle thread */
thread_idle_init();
}
/* The thread scheduler is consider like a driver */
gint_driver_t drv_thread = {
.name = "THREAD",
.init = init
};
GINT_DECLARE_DRIVER(4, drv_thread);

View File

@ -304,6 +304,53 @@ int timer_timeout(void volatile *arg)
return TIMER_STOP;
}
//---
// Debugging function
//---
/* timer_get_hw_info(): Get hardware information */
int timer_debug_get_hw_info(int id, struct timer_debug_info *info)
{
uint16_t etmu_event[6] = { 0x9e0, 0xc20, 0xc40, 0x900, 0xd00, 0xfa0 };
/* check error */
if (info == NULL && id >= timer_count())
return (-1);
/* The timer should be installed... */
if(!timers[id])
return (-2);
/* check timer hardware type (ETMU or TMU) */
if(id < 3) {
tmu_t *T = &TMU[id];
info->hardware.address.tstr = (void *)TSTR;
info->hardware.address.tcor = (void *)&T->TCOR;
info->hardware.address.tcnt = (void *)&T->TCNT;
info->hardware.address.tcr = (void *)&T->TCR;
info->hardware.context.tcor = T->TCOR;
info->hardware.context.tcnt = T->TCNT;
info->hardware.context.tcr = T->TCR.word;
info->interrupt.mask.tcr.unf = 0x0100;
info->interrupt.mask.tcr.unie = 0x0020;
info->interrupt.mask.tstr.str = 1 << id;
info->interrupt.id = 0x400 + (0x20 * id);
} else {
etmu_t *T = &ETMU[id - 3];
info->hardware.address.tstr = (void *)&T->TSTR;
info->hardware.address.tcor = (void *)&T->TCOR;
info->hardware.address.tcnt = (void *)&T->TCNT;
info->hardware.address.tcr = (void *)&T->TCR;
info->hardware.context.tcor = T->TCOR;
info->hardware.context.tcnt = T->TCNT;
info->hardware.context.tcr = T->TCR.byte;
info->interrupt.mask.tcr.unf = 0x0002;
info->interrupt.mask.tcr.unie = 0x0001;
info->interrupt.mask.tstr.str = 0x01;
info->interrupt.id = etmu_event[id - 3];
}
return (0);
}
//---
// Driver initialization
//---