* add (real) scheduler interface
* add signals interface
* add idle thread interface (prototype)
* add thread attribute interface (thread behaviour configuration)
* add (real) kernel yield function
* add fx9860 support.
* add kernel stack, used in the kernel part.
* update kernel part
* fix scheduler timer stop/reload error.
* fix mutex deadlock
* fix setjmp / longjmp functions

Know issue:
    If a thread use the "longjmp()" feature, a crash will occur, not during the
    non-local goto, but when the first "return" involved. I don't know exactly
    why this happens, but I think that is because I use many times
    "thread_atomic_start()" and if an exception occurs during this part, a crash
    will occur.

    Currently, the refactoring is (probably) coming to the end and the thread
    management is pretty stable on a simple project, this is why I don't fix
    this issue now.
This commit is contained in:
Yatis 2021-01-03 11:46:21 +01:00
parent 2c5cef3015
commit 6399ed6f40
13 changed files with 1308 additions and 403 deletions

View File

@ -185,6 +185,13 @@ SECTIONS
*(.ilram)
/* Code that must remain mapped is placed here */
*(.gint.mapped)
/* thread kernel stack */
_thread_kernel_stack_end = . ;
. = . + 2048 ;
_thread_kernel_stack_start = . ;
. = ALIGN(16);
} > ilram AT> rom
@ -222,13 +229,13 @@ SECTIONS
/* Code that must remain permanently mapped (.gint.mapped); relocated
to start of user RAM at startup, accessed through P1 */
.gint.mapped ALIGN(4) : ALIGN(4) {
/*.gint.mapped ALIGN(4) : ALIGN(4) {
_lgmapped = LOADADDR(.gint.mapped);
*(.gint.mapped)
. = ALIGN(16);
} > rram AT> rom
_sgmapped = SIZEOF(.gint.mapped);
_sgmapped = SIZEOF(.gint.mapped);*/

View File

@ -147,6 +147,10 @@ SECTIONS
/* Code that must remain mapped is placed here */
*(.gint.mapped)
/* thread kernel stack */
_thread_kernel_stack_end = . ;
. = . + 2048 ;
_thread_kernel_stack_start = . ;
. = ALIGN(16);
} > ilram AT> rom

View File

@ -5,6 +5,9 @@
#include <stdint.h>
#include <stdarg.h>
/* define the jmp_buf type */
#include <gint/std/setjmp.h>
/* Define the default context switching frequency */
#ifndef THREAD_SCHEDULER_FREQUENCY
# define THREAD_SCHEDULER_FREQUENCY 16
@ -16,10 +19,165 @@
#endif
/* Define the default kernel idle thread's stack size */
#ifndef THREAD_KERNEL_IDLE_STACK_SIZE
# define THREAD_KERNEL_IDLE_STACK_SIZE 32
#ifndef THREAD_IDLE_STACK_SIZE
# define THREAD_IDLE_STACK_SIZE 32
#endif
//---
// Thread attribute interface
//---
/* define the thread wartermark, used to check if the mutex is valid */
#define THREAD_ATTR_WATERMARK (~0xdeadbeef)
/* define the fundamental data type and alias for thread attribute */
struct thread_attr_s {
struct {
enum {
THREAD_ATTR_JOINABLE = 0,
THREAD_ATTR_DETACHED = 1,
} detach;
enum {
THREAD_ATTR_LONGJMP_DISABLE = 0,
THREAD_ATTR_LONGJMP_ENABLE = 1,
} longjmp;
} state;
struct {
uint32_t watermark;
} private;
};
typedef struct thread_attr_s thread_attr_t;
/* thread_attr_init(): Initialize thread attribute object
This function initializes the thread attributes object pointed to by ATTR
with default attribute values. After this call, individual attributes of the
object can be set using various related functions formatted like
"thread_attr_*()" and then the object can be used in one or more
"thread_create()" calls that create threads.
@return
* negative value if ATTR is NULL
* 0 if success */
extern int thread_attr_init(thread_attr_t *attr);
/* thread_attr_setdetachstate() and thread_attr_getdetachstate()
The "thread_attr_setdetachstate()" function sets the detach state attribute
of the thread attributes object referred to by attr to the value specified
in detachstate.
The detach state attribute determines whether a thread created using the
thread attributes object attr will be created in a joinable or a detached
state.
The following values may be specified in detachstate:
- THREAD_ATTR_DETACHED
Threads that are created using ATTR will be created in a detached state.
- THREAD_ATTR_JOINABLE
Threads that are created using attr will be created in a joinable state.
The default setting of the detach state attribute in a newly initialized
thread attributes object is THREAD_ATTR_JOINABLE.
The "thread_attr_getdetachstate()" returns the detach state attribute of
the thread attributes object attr in the buffer pointed to by detachstate.
@return
* negative value if ATTR is NULL or uninitialized
* 0 if success */
extern int thread_attr_setdetachstate(thread_attr_t *attr, int detachstate);
extern int thread_attr_getdetachstate(thread_attr_t *attr, int *detachstate);
/* thread_attr_enablelongjmp() thread_attr_getlongjmpstatus()
This part is CUSTOM to the Gint kernel, it will allow, when the thread with
this attibute die, instead of release its memories and been removed from the
scheduler, to perform a "longjmp()" and returns ("rewinds") to the
"thread_create()" function which will return the special THREAD_ATTR_REWIND
and all child thread of the thread will be killed (using signals).
In the case of Gint, this feature is very useful because the "main" function
can be threaded and we can return the to "dark-side" of the kernel to manage
the destructors or to recall the main function again. All this while allowing
(potential) threaded driver that can continue working while being isolated
from the Gint "bootstrap" part.
The following values may be specified in "status":
- THREAD_ATTR_LONGJMP_ENABLE
Enable the longjmp() when the thread come to the end.
- THREAD_ATTR_LONGJMP_DISABLE
Disable the longjmp() when the thread come to the end.
@return
* negative value if ATTR is NULL or uninitialized
* 0 if success */
extern int thread_attr_enablelongjmp(thread_attr_t *attr, int status);
extern int thread_attr_getlongjmpstatus(thread_attr_t *attr, int *status);
/* thread_attr_destroy(): Destroy thread attribute object
When a thread attributes object is no longer required, it should be destroyed
using the pthread_attr_destroy() function. Destroying a thread attributes
object has no effect on threads that were created using that object.
Once a thread attributes object has been destroyed, it can be reinitialized
using "thread_attr_init()". Any other use of a destroyed thread attributes
object has undefined results. */
extern int thread_attr_destroy(thread_attr_t *attr);
//---
// Signals interface
//---
/* Define thread signal set type */
typedef uint32_t sigset_t;
/* Define signals handler type */
typedef void (*sighandler_t)(int);
/* Define fake signal functions. */
#define SIG_ERR ((sighandler_t) -1) /* Error return. */
#define SIG_DFL ((sighandler_t) 0) /* Default action. */
#define SIG_IGN ((sighandler_t) 1) /* Ignore signal. */
/* Define the number of signals availbable */
#define NSIG 19
/* Define all signum
(note: all signal are not currently supported but will be in near futur) */
#define SIGKILL 0 /* (unblockable) Killed */
#define SIGSTOP 1 /* (unblockable) Stop */
#define SIGTERM 2 /* (unblockable) Termination request. */
#define SIGCONT 3 /* Continue. */
#define SIGTRAP 4 /* Trace/breakpoint trap. */
#define SIGILL 5 /* Illegal instruction. */
#define SIGTSTP 6 /* Keyboard stop. */
#define SIGABRT 7 /* Abnormal termination. */
#define SIGCHLD 8 /* Child terminated or stopped. */
#define SIGPOLL 9 /* Pollable event occurred (System V). */
#define SIGVTALRM 10 /* Virtual timer expired. */
#define SIGPROF 11 /* Profiling timer expired. */
#define SIGUSR1 12 /* User-defined signal 1. */
#define SIGUSR2 13 /* User-defined signal 2. */
#define SIGHUP 14 /* hang up. */
#define SIGINT 15 /* interruption */
#define SIGBUS 16 /* bus error */
#define SIGFPE 17 /* fata aritmetic error */
#define SIGSEGV 18 /* segmentation violation */
//---
// User thread interface
//---
/* define specrial return value with the thread_create() function */
#define THREAD_CREATE_LONGJMP_RETURN (0xd1ceca5e)
/* Define thread ID alias */
typedef uint32_t thread_t;
@ -41,78 +199,105 @@ struct cpu_ctx
//TODO: signals mask !
struct thread {
/* hardware context */
struct cpu_ctx context;
/* thread configuration */
struct thread_attr_s attr;
/* signals information */
struct {
struct cpu_ctx cpu;
} context;
sighandler_t handler[NSIG];
sigset_t pending;
sigset_t blocking;
} signals;
/* thread status */
enum {
THREAD_STATUS_STOPPED, /* thread initialized but not stated */
THREAD_STATUS_RUNNING, /* thread started */
THREAD_STATUD_PAUSED, /* thread paused (not implemented) */
THREAD_STATUS_ZOMBIE, /* thread terminated but not removed */
} status;
/* thread scheduler information */
struct {
/* thread status */
enum {
THREAD_STATUS_PAUSED = 0,
THREAD_STATUS_RUNNING = 1,
THREAD_STATUS_ZOMBIE = 2,
THREAD_STATUS_DEAD = 3
} status;
/* private information (used by the scheduler) */
/* thread identifier */
thread_t id;
/* hierarchical information */
struct thread *next; /* potential next process */
} scheduler;
/* private information */
struct {
uintptr_t stack; /* original stack address */
thread_t id; /* thread ID */
void *ret; /* saved exited value */
struct thread *next; /* potential next process */
uintptr_t ret; /* saved exit value */
jmp_buf jmpbuf; /* longjmp feature */
struct thread *sibling; /* sibling thread list */
struct thread *child; /* child thread list */
struct thread *parent; /* thread parent address */
} private;
};
//---
// User thread interface
//---
/* thread_create(): Create a new thread */
extern int thread_create(thread_t *thread, int nb_arg, void *function, ...);
extern int thread_create(thread_t *thread,
thread_attr_t *attr, void *function, ...);
/* thread_join(): Waits for the thread specified by thread to terminate */
/* Makes sure an argument is always provided, for va_arg() and for definite the
last arguments sended by the user */
#define THREAD_CREATE_ARG_END_WATERMARK (0xdeb0cad0)
#define thread_create(...) thread_create(__VA_ARGS__,\
THREAD_CREATE_ARG_END_WATERMARK)
/* thread_join(): Waits for the thread specified by thread to terminate
This function waits for the thread specified by thread to terminate.
If that thread has already terminated, then "thread_join()" returns
immediately. The thread specified by THREAD must be joinable.
If RETVAL is not NULL, then "thread_join()" copies the exit status of the
target thread (i.e., the value that the target thread supplied to
"thread_exit()") into the location pointed to by RETVAL. If the target thread
was canceled, then THREAD_CANCELED is placed in the location pointed to by
RETVAL.
If multiple threads simultaneously try to join with the same thread, the
results are undefined. If the thread calling "thread_join()" is canceled,
then the target thread will remain joinable (i.e., it will not be detached).
*/
extern int thread_join(thread_t thread, void **retvel);
extern int thread_tryjoin(thread_t thread, void **retval);
/* thread_exit(): Terminate the calling thread */
/* thread_exit(): Terminate the calling thread
This function will terminates the calling thread and returns a value via
retval that (if the thread is joinable) is available to another thread in the
same process that calls "thread_join()".
Any clean-up handlers established by pthread_cleanup_push(3) that have not
yet been popped, are popped (in the reverse of the order in which they were
pushed) and executed. If the thread has any thread-specific data, then, after
the clean-up handlers have been executed, the corresponding destructor
functions are called, in an unspecified order.
When a thread terminates, process-shared resources (e.g., mutexes, condition
variables, semaphores, and file descriptors) are not released. */
extern void thread_exit(void *retval);
/* thread_yield(): Cause the calling thread to relinquish the CPU */
extern void thread_yield(void);
/* thread_kill(): Kill a thread
This function will destroy a thread without care about its attributes. The
thread SHOUL NEVER be used anymore after this function.
This function is used by the scheduler to destroy thread from its queue
before removing it. */
extern int thread_kill(thread_t thread, int sig);
/* TODO doc */
extern void (*thread_signal(int signum, void (*handler)(int)))(int);
//---
// Thread atomic operation
//---
/* thread_atomic_start(): Start atomic operation
This function will block interruptions and exception until
"thread_atomic_stop()" is called. This is really useful when you need to
secure some tricky part of code (like driver kernel-level implementation).
But be carefull: your code executed after this function SHOULD be
EXCEPTION-SAFE ! Otherwise, a crash will occur and Gint can do nothing to
avoid it because is hardware specific. If you need to secure shared data,
use mutex instead.
This implementation is recursive-safe and will return:
* SR value when you enter in "atomic" operation (first call)
* 0 if you are already in a "atomic" operation (x call)
To return to the "normal" operation, you should call "thread_atomic_stop()"
as many time as you have involved with "thread_atomic_start()". */
extern uint32_t thread_atomic_start(void);
/* thread_atomic_stop(): Stop atomic opration
This function will try to return to the "normal" mode and will return:
* negative value If error occur
* 0 If you are alwayrs in "atomic" mode
* the restored SR value If you are returned to the "clasic" mode */
extern uint32_t thread_atomic_stop(void);
/* KERNEL-ONLY TODO Doc */
extern int thread_terminate(struct thread *thread);
@ -260,8 +445,56 @@ extern void thread_mutex_destroy(thread_mutex_t *mutex);
//---
// Thread signals management
// Thread atomic operation
//---
/* thread_atomic_start(): Start atomic operation
This function will block interruptions and exception until
"thread_atomic_stop()" is called. This is really useful when you need to
secure some tricky part of code (like driver kernel-level implementation).
But be carefull: your code executed after this function SHOULD be
EXCEPTION-SAFE ! Otherwise, a crash will occur and Gint can do nothing to
avoid it because is hardware specific. If you need to secure shared data,
use mutex instead.
This implementation is recursive-safe and will return:
* SR value when you enter in "atomic" operation (first call)
* 0 if you are already in a "atomic" operation (x call)
To return to the "normal" operation, you should call "thread_atomic_stop()"
as many time as you have involved with "thread_atomic_start()". */
extern uint32_t thread_atomic_start(void);
/* thread_atomic_stop(): Stop atomic opration
This function will try to return to the "normal" mode and will return:
* negative value If error occur
* 0 If you are alwayrs in "atomic" mode
* the restored SR value If you are returned to the "clasic" mode */
extern uint32_t thread_atomic_stop(void);
//---
// Signals management (kernel)
//---
enum {
thread_signals_deliver_retval_running = 0,
thread_signals_deliver_retval_stopped = 1,
thread_signals_deliver_retval_dead = 2,
};
/* Macros for constructing status values. */
#define __W_EXITCODE(ret, sig) ((ret) << 8 | (sig))
#define __W_STOPCODE(sig) ((sig) << 8 | 0x7f)
/* thread_signals_raise(): Raise a signal
This function will raise a signal for a given thread. This function is used
to raise kernel-based signals like SIGKILL. */
extern int thread_signals_raise(struct thread *thread, int sig);
/* thread_signals_deliver_pending(): Deliver all pending signals
This function is KERNEL-ONLY and SOULD NEVER be called because it is
@ -269,17 +502,52 @@ extern void thread_mutex_destroy(thread_mutex_t *mutex);
<gint/thread/scheduler.c> for more information). */
extern int thread_signals_pending_deliver(struct thread *thread);
/* thread_signals_replace(): TODO */
extern void (*thread_signals_replace(struct thread *thread,
int signum, void (*handler)(int)))(int);
/* thread_signals_sigreturn(): TODO */
extern void thread_signals_sigreturn(void);
//---
// Idle thread interface
//---
/* thread_idle_initialize(): Initialize the idle thread */
extern void thread_idle_init(void);
extern void thread_idle_uninit(void);
/* thread_idle_get(): Return the idle thread address */
extern struct thread *thread_idle_get(void);
//---
// Scheduler interface
//---
extern void thread_sched_init(void);
extern void thread_sched_uninit(void);
/* thread_sched_start(): */
extern void thread_sched_start(void);
extern void thread_sched_stop(void);
extern int thread_sched_add(struct thread *thread);
extern int thread_sched_remove(struct thread *thread);
extern struct thread *thread_sched_find(thread_t thread);
extern struct thread *thread_sched_get_current(void);
extern int thread_sched_get_counter(void);
extern void thread_sched_invalidate(void);
//---
// Kernel thread interface
//---
/* thread_kernel_idle_code(): Idle code used by the idle thread
The idle thread is running when none of the registered thread is able to run.
This dummy (for now) thread will just wait until the next Gint event. */
extern void thread_kernel_idle_code(void);
/* thread_kernel_terminate_trampoline(): Termination trampiline code.
@ -288,10 +556,10 @@ extern void thread_kernel_idle_code(void);
SHOULD NOT be called. */
extern void thread_kernel_terminate_trampoline(void);
/* thread_kernel_get_current(): Get current thread object address */
extern struct thread *thread_kernel_get_current(void);
/* thread_kernel_yield(): Cause the calling thread to relinquish the CPU */
extern void thread_kernel_yield(void);
/* thread_kernel_exit(): Terminate the calling thread */
extern void thread_kernel_exit(void *retval);
#endif /* GINT_THREAD */

View File

@ -120,7 +120,7 @@ _gint_inth_7305:
.zero 2
/* information */
1: .long _thread_kernel_sched_procedure
1: .long _thread_kernel_sched_interrupt_procedure
2: .long 0xff000028
/* block padding */

View File

@ -11,6 +11,8 @@
#include <gint/exc.h>
#include <gint/thread.h>
#include <gint/display.h>
#include "kernel.h"
/* Symbols provided by the linker script. For sections:
@ -148,6 +150,7 @@ int start(int isappli, int optnum)
regcpy(&lyram, &syram, &ryram);
}
#if 0
#ifdef FX9860G
/* Copy permanently-mapped code to start of user RAM (on fx-CG 50 it
is loaded along ILRAM contents) */
@ -161,6 +164,7 @@ int start(int isappli, int optnum)
fixups[i] += (uint32_t)rgmapped;
}
#endif
#endif
/* Install gint, switch VBR and initialize drivers */
kinit();
@ -178,18 +182,35 @@ int start(int isappli, int optnum)
/* "Pre-main" loop.
We're trying to run the main function using a thread, if the thread
creation fail, we will use the "classic" way to do the job. */
int rc = 1;
creation fail or if we return from the thread creation using the
longjmp() feature (see <gint/thread.h> for more information), we will
use the "classic" way to do the job.
@note
When we return from the thread_create() function with the longjmp()
feature, we are always in a thread execution (the scheduler always
performs context switch) */
int rc = -1;
thread_t thread;
while(1)
{
if (thread_create(&thread, 2, &main, isappli, optnum) != 0) {
rc = main(isappli, optnum);
} else {
thread_join(thread, (void**)&rc);
}
if(!gint_restart) break;
thread_attr_t attr;
thread_attr_init(&attr);
thread_attr_setdetachstate(&attr, THREAD_ATTR_DETACHED);
thread_attr_enablelongjmp(&attr, THREAD_ATTR_LONGJMP_ENABLE);
int retval = thread_create(&thread, &attr, &main, isappli, optnum);
if (retval == 0)
thread_join(thread, (void**)&rc);
//if (retval != 0 && (uint32_t)retval != THREAD_CREATE_LONGJMP_RETURN)
// rc = main(isappli, optnum);
dclear(C_BLACK);
dtext(0, 0, C_WHITE, "longjmp, success !");
dupdate();
while (1);
/* main loop */
while (gint_restart == 0) {
gint_osmenu();
rc = main(isappli, optnum);
}
callarray(&bdtors, &edtors);

View File

@ -22,7 +22,6 @@ _setjmp:
/* save current environment */
add #52, r4
mov.l r3, @-r4 ! previous SR register status
sts.l pr, @-r4
sts.l mach, @-r4
sts.l macl, @-r4
@ -35,6 +34,7 @@ _setjmp:
mov.l r10, @-r4
mov.l r9, @-r4
mov.l r8, @-r4
mov.l r3, @-r4 ! previous SR register status
/* restore sr then exit */
ldc r3, sr
@ -57,8 +57,8 @@ _longjmp:
or r2, r1
ldc r1, sr
/* check and save arg validity into unbankable register to avoid
error when the "old" SR register will be restored */
/* check arg validity and save it into unbankable register to avoid
error when the "old" SR register is restored */
tst r5, r5
mov r4, r8
bf/s env_switch

98
src/thread/attributes.c Normal file
View File

@ -0,0 +1,98 @@
//---
// gint:thread:attributes - Thread attribute helper
//---
#include <gint/thread.h>
#include <gint/std/string.h>
//---
// User interface
//---
/* thread_attr_init(): Initialize thread attribute object */
int thread_attr_init(thread_attr_t *attr)
{
if (attr == NULL)
return (-1);
thread_atomic_start();
memset(attr, 0x00, sizeof(struct thread_attr_s));
attr->state.detach = THREAD_ATTR_JOINABLE;
attr->state.longjmp = THREAD_ATTR_LONGJMP_DISABLE;
attr->private. watermark = THREAD_ATTR_WATERMARK;
thread_atomic_stop();
return (0);
}
/* thread_attr_setdetachstate() and thread_attr_getdetachstate()
The "thread_attr_setdetachstate()" function sets the detach state attribute
of the thread attributes object referred to by attr to the value specified
in detachstate.
The "thread_attr_getdetachstate()" returns the detach state attribute of
the thread attributes object attr in the buffer pointed to by detachstate.
*/
int thread_attr_setdetachstate(thread_attr_t *attr, int detachstate)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
switch (detachstate) {
case THREAD_ATTR_JOINABLE:
case THREAD_ATTR_DETACHED:
break;
default:
return (-1);
}
thread_atomic_start();
attr->state.detach = detachstate;
thread_atomic_stop();
return (0);
}
int thread_attr_getdetachstate(thread_attr_t *attr, int *detachstate)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
thread_atomic_start();
*detachstate = attr->state.detach;
thread_atomic_stop();
return (0);
}
/* thread_attr_enablelongjmp() thread_attr_getlongjmpstatus()
Enable / disable the custom longjmp feature */
int thread_attr_enablelongjmp(thread_attr_t *attr, int status)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
switch (status) {
case THREAD_ATTR_LONGJMP_ENABLE:
case THREAD_ATTR_LONGJMP_DISABLE:
break;
default:
return (-1);
}
thread_atomic_start();
attr->state.longjmp = status;
thread_atomic_stop();
return (0);
}
int thread_attr_getlongjmpstatus(thread_attr_t *attr, int *status)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
thread_atomic_start();
*status = attr->state.longjmp;
thread_atomic_stop();
return (0);
}
/* thread_attr_destroy(): Destroy thread attribute object */
int thread_attr_destroy(thread_attr_t *attr)
{
if (attr == NULL || attr->private.watermark != THREAD_ATTR_WATERMARK)
return (-1);
thread_atomic_start();
memset(attr, 0x00, sizeof(struct thread_attr_s));
thread_atomic_stop();
return (0);
}

52
src/thread/idle.c Normal file
View File

@ -0,0 +1,52 @@
//---
// gint:thread:idle - Idle thread interface
//---
#include <gint/thread.h>
#include <gint/std/string.h>
/* define private sombols */
static struct thread thread_idle;
static uint32_t thread_idle_stack[THREAD_IDLE_STACK_SIZE];
//---
// Internal
//---
/* thread_idle_code(): Code executed by the idle thread */
static void thread_idle_code(void)
{
while (1) {
__asm__ volatile ("sleep");
}
}
//---
// Module primitive
//---
/* thread_idle_init(): Initialize the idle thread */
void thread_idle_init(void)
{
memset(&thread_idle, 0x00, sizeof(struct thread));
thread_idle.context.reg[15] = (uintptr_t)&thread_idle_stack;
thread_idle.context.reg[15] += (THREAD_IDLE_STACK_SIZE << 2);
thread_idle.context.spc = (uintptr_t)&thread_idle_code;
thread_idle.context.pr = (uintptr_t)0xa0000000;
}
void thread_idle_uninit(void)
{
return;
}
//---
// User interface
//---
struct thread *thread_idle_get(void)
{
return (&thread_idle);
}

View File

@ -3,15 +3,15 @@
** This file contains all "kernel" low-level part of the thread module:
** - thread_kernel_sched_procedure() Thread scheduler procedure
** - thread_kernel_terminate_trampoline() Thread terminsation trampoline
** - thread_kernel_idle_code() Idle thread's code
** - thread_kernel_yield() Relinquish the CPU
** - thread_kernel_exit() Terminate the current thread
*/
.section .gint.mapped, "ax"
.align 4
.global _thread_kernel_sched_procedure
.type _thread_kernel_sched_procedure, @function
.global _thread_kernel_sched_interrupt_procedure
.type _thread_kernel_sched_interrupt_procedure, @function
/* thread_kernel_sched_procedure(): Scheduler entry
@ -36,41 +36,40 @@
same for the R1 register which content the stack snapshot. (Yes, I knewn that
the SH4-based MPU provide the SGR register which saves the stack snapshot
when interrupts / exceptions occur, but we need to be SH3 compatible) */
_thread_kernel_sched_procedure:
_thread_kernel_sched_interrupt_procedure:
/* First, check if the interruption is the scheduler's timer */
mov.l intevt_register, r2
mov.l thread_tmu_interrupt_id, r3
mov.l thread_sched_tmu_interrupt_id, r3
mov.l @r2, r2
mov.l @r3, r3
cmp/eq r2, r3
bt stop_timer
bt _thread_kernel_sched_entry
rts
nop
stop_timer:
_thread_kernel_sched_entry:
/* Stop the scheduler's timer to "avoid" counting the context
switching time.
@note:
The scheduler can use ETMU instead of a classical TMU. ETMU have
switching time. */
/* The scheduler can use ETMU instead of a classical TMU. ETMU have
slow writting interraction, but have the same access size that the
ETMU, this is why we use slow operation here without checking the
TMU, this is why we use slow operation here without checking the
timer type. */
mov.l thread_tmu_tstr_mask, r2
mov.l thread_tmu_tstr_addr, r3
mov.b @r2, r2
mov.b @r3, r4
not r2, r2
and r2, r4
tstr_slow_clear:
mov.b r4, @r3
mov.b @r3, r5
cmp/eq r5, r4
bf tstr_slow_clear
mov.l thread_sched_tmu_tstr_addr, r2
mov.l thread_sched_tmu_tstr_mask, r3
mov.l @r2, r2
mov.l @r3, r3
mov.b @r2, r6
not r3, r3
and r3, r6
timer_tstr_slow_stop:
mov.b r6, @r2
mov.b @r2, r7
cmp/eq r6, r7
bf timer_tstr_slow_stop
save_context:
/* check if a current thread is running */
mov.l thread_current_context, r2
mov.l thread_sched_current, r2
mov.l @r2, r2
tst r2, r2
bt/s schedule_thread
@ -103,10 +102,14 @@ save_context:
schedule_thread:
/* Call high-level abraction
@note
We need to save the R1/R0 registers (that contains the user stack/PR
register snapshot respectively) because if the scheduler fail, we
SHOULD be able to restore the context before returning from here. */
SHOULD be able to restore the context before returning from here.
We alse need to swtich the stack to avoid undefined behaviour if the
current thread is destroyed during the schedule (we currently always
use the thread stack here) */
mov.l thread_kernel_stack, r15
mov.l r1, @-r15
mov.l r0, @-r15
mov.l thread_schedule, r0
@ -124,6 +127,10 @@ schedule_thread:
context_restore:
/* update_current_thread */
mov.l thread_sched_current, r1
mov.l r0, @r1
/* restore the new context */
ldc.l @r0+, R0_BANK
ldc.l @r0+, R1_BANK
@ -150,61 +157,96 @@ context_restore:
scheduler_restart_timer:
/* Check if the scheduler uses TMU or ETMU */
mov.l thread_scheduler_gint_timer_id, r0
mov.l thread_sched_gint_timer_id, r0
mov.l @r0, r0
mov #3, r1
cmp/ge r1, r0
/* Get scheduler timer pre-calculated information */
mov.l thread_tmu_tcr_mask, r0
mov.l thread_tmu_tcr_addr, r1
mov.l @r1, r1
mov.l thread_sched_tmu_tcr_addr, r0
mov.l thread_sched_tmu_tcr_mask, r1
mov.l @r0, r0
not r0, r0
mov.l thread_tmu_tstr_mask, r2
mov.l thread_tmu_tstr_addr, r3
bf/s tmu_restart_timer
mov.l @r1, r1
not r1, r1
mov.l thread_sched_tmu_tcor_addr, r2
mov.l thread_sched_tmu_tcnt_addr, r3
mov.l @r2, r2
mov.l @r3, r3
mov.l thread_sched_tmu_tstr_addr, r4
mov.l thread_sched_tmu_tstr_mask, r5
mov.l @r4, r4
bf/s tmu_restart_timer
mov.l @r5, r5
etmu_restart_timer:
/* clear interrupt flag (ETMU.TCR.UNF) */
mov.b @r1, r4
and r0, r4
tcr_slow_write:
mov.b r4, @r1
mov.b @r1, r0
cmp/eq r4, r0
bf tcr_slow_write
/* clear interrupt flag (ETMU.TCR.UNF = 0) */
mov.b @r0, r6
and r1, r6
etmu_tcr_slow_unf_clear:
mov.b r6, @r0
mov.b @r0, r7
cmp/eq r6, r7
bf etmu_tcr_slow_unf_clear
/* Restart timer (ETMU.TSTR) */
mov.b @r3, r4
or r2, r4
tstr_slow_write:
mov.b r4, @r3
mov.b @r3, r0
cmp/eq r4, r0
bf tstr_slow_write
/* reload the timer counter (ETMU.TCNT = ETMU.TCOR) */
mov.l @r2, r6
etmu_slow_reload:
mov.l r6, @r3
mov.l @r3, r7
cmp/eq r6, r7
bf etmu_slow_reload
/* Check if we really need to restart the timer or not */
mov.l thread_sched_counter, r0
mov.l @r0, r0
mov #2, r1
cmp/ge r1, r0
bf process_switch
/* Restart timer (ETMU.TSTR = 1) */
mov.b @r4, r6
or r5, r6
etmu_tstr_slow_restart:
mov.b r6, @r4
mov.b @r4, r7
cmp/eq r6, r7
bf etmu_tstr_slow_restart
bra process_switch
nop
tmu_restart_timer:
/* clear interrupt flag (TMU.TCR) */
mov.w @r1, r4 ! TCR addr
and r0, r4 ! TCR.UNF = 0
mov.w r4, @r1
/* clear interrupt flag (TMU.TCR.UNF) */
mov.w @r0, r6
and r1, r6
mov.w r6, @r0
/* reload the timer counter */
mov.l @r2, r6
mov.l r6, @r3
/* Check if we really need to restart the timer or not */
mov.l thread_sched_counter, r0
mov.l @r0, r0
mov #2, r1
cmp/ge r1, r0
bf process_switch
/* Restart timer (TMU.TSTR) */
mov.b @r3, r4
or r2, r4
mov.b r4, @r3
mov.b @r4, r6
or r5, r6
mov.b r6, @r4
process_switch:
rte
nop
.align 4
/* kernel information */
thread_kernel_stack: .long _thread_kernel_stack_start
/* scheduler global information */
thread_current_context: .long _thread_scheduler_current
thread_sched_current: .long _thread_sched_current
thread_sched_counter: .long _thread_sched_counter
/* High-level functions */
thread_schedule: .long _thread_schedule
@ -213,12 +255,14 @@ thread_schedule_debug: .long _thread_schedule_debug
#endif
/* Timer hardware pre-calculated information */
thread_scheduler_gint_timer_id: .long _thread_scheduler_gint_timer_id
thread_tmu_interrupt_id: .long _thread_tmu_interrupt_id
thread_tmu_tcr_addr: .long _thread_tmu_tcr_addr
thread_tmu_tcr_mask: .long _thread_tmu_tcr_mask
thread_tmu_tstr_mask: .long _thread_tmu_tstr_mask
thread_tmu_tstr_addr: .long _thread_tmu_tstr_addr
thread_sched_gint_timer_id: .long _thread_sched_gint_timer_id
thread_sched_tmu_interrupt_id: .long _thread_sched_tmu_interrupt_id
thread_sched_tmu_tcr_addr: .long _thread_sched_tmu_tcr_addr
thread_sched_tmu_tcr_mask: .long _thread_sched_tmu_tcr_mask
thread_sched_tmu_tstr_mask: .long _thread_sched_tmu_tstr_mask
thread_sched_tmu_tstr_addr: .long _thread_sched_tmu_tstr_addr
thread_sched_tmu_tcor_addr: .long _thread_sched_tmu_tcor_addr
thread_sched_tmu_tcnt_addr: .long _thread_sched_tmu_tcnt_addr
/* Other information */
intevt_register: .long 0xff000028
@ -234,64 +278,57 @@ intevt_register: .long 0xff000028
This function will move the calling thread to the end of the queue for its
static priority and new thread gets on run. */
_thread_kernel_yield:
/* TODO: involve the scheduler ! */
sleep
rts
/* start atomic operation + bank switch*/
stc sr, r0
mov r0, r1
mov #0x30, r2
shll8 r2
shll16 r2
or r2, r1
ldc r1, sr
/* prepare bank switch */
stc R0_BANK, r0
sts pr, r1
ldc r0, ssr
ldc r1, spc
/* simulate the interrupt by switching the register bank */
mov #0x20, r1
shll8 r1
shll16 r1
or r1, r0
ldc r0, sr
/* prepare to jump into thread_kernel_scheudler_entry() */
xor r0, r0
mov r15, r1
mov.l 1f, r2
jsr @r2
nop
.global _thread_kernel_get_current
.type _thread_kernel_get_current, @function
/* thread_kernel_get_current(): Get current thread structure address
This function will get the current thread address. You MUST not call it
unless you know exactly what you are doing with this type of information.
But, if you want to manipulate it, be sure that you are in "atomic" mode. */
_thread_kernel_get_current:
mov.l thread_scheduler_current, r0
rts
mov.l @r0, r0
.align 4
thread_scheduler_current:
.long _thread_scheduler_current
pouet:
sleep
bra pouet
nop
.global _thread_kernel_terminate_trampoline
.type _thread_kernel_terminate_trampoline, @function
/* thread_kernel_terminate_trampoline(): Thread termination trampoline
.global _thread_kernel_exit
.type _thread_kernel_exit, @function
This function SHOULD NEVER be called manually! It will be involved when a
thread return from his main procedure. This part exists only for saving the
returned value then involved the high-level function "thread_terminate()"
(see <gint/thread/thread.c> for more information). */
/*
r0 - SR backup
*/
_thread_kernel_terminate_trampoline:
mov.l thread_exit, r1
mov.l 2f, r1
jmp @r1
mov #0, r4
mov r0, r4
.align 4
thread_exit: .long _thread_exit
.global _thread_kernel_idle_code
.type _thread_kernel_idle_code, @function
/* thread_kernel_idle_code(): Idle code for the idle thread
This procedure will do nothing and will wait any hardware signal.
This thread's code is involved when zero thread can be executed, his role is
to maintain the scheduler up for the eventual delayed signal which woke up
one interrupted thread. */
_thread_kernel_idle_code:
bra _thread_kernel_idle_code
nop
rts
nop
1: .long _thread_kernel_sched_entry
2: .long _thread_exit

View File

@ -32,7 +32,7 @@ static struct thread *thread_mutex_validity_check(thread_mutex_t *mutex)
if (mutex == NULL || mutex->watermark != THREAD_MUTEX_WATERMARK)
return (NULL);
thread = thread_kernel_get_current();
thread = thread_sched_get_current();
if (thread == NULL)
return (NULL);
return (thread);
@ -86,7 +86,7 @@ int thread_mutex_lock(thread_mutex_t *mutex)
/* Check if the mutex is recursive */
if ((mutex->type & thread_mutex_type_recursive) != 0) {
if (mutex->owner == thread->private.id) {
if (mutex->owner == thread->scheduler.id) {
mutex->lock = mutex->lock + 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
@ -97,15 +97,13 @@ int thread_mutex_lock(thread_mutex_t *mutex)
while (1) {
/* Check if the mutex is unlocked */
if (mutex->lock == 0) {
mutex->owner = thread->private.id;
mutex->owner = thread->scheduler.id;
mutex->lock = 1;
break;
};
/* Wait next schedule */
thread_atomic_stop();
thread_yield();
thread_atomic_start();
thread_kernel_yield();
}
/* Lock and return */
@ -128,7 +126,7 @@ int thread_mutex_timedlock(thread_mutex_t *mutex, uint64_t delay_us)
/* Check if the mutex is recursive */
if (mutex->type & thread_mutex_type_recursive) {
if (mutex->owner == thread->private.id) {
if (mutex->owner == thread->scheduler.id) {
mutex->lock = mutex->lock + 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
@ -156,15 +154,13 @@ int thread_mutex_timedlock(thread_mutex_t *mutex, uint64_t delay_us)
/* Check if the mutex is unlocked */
if (mutex->lock == 0) {
mutex->lock = 1;
mutex->owner = thread->private.id;
mutex->owner = thread->scheduler.id;
retval = thread_mutex_retval_success;
break;
};
/* Wait next schedule */
thread_atomic_stop();
thread_yield();
thread_atomic_start();
thread_kernel_yield();
}
/* destroy the timer and return */
@ -190,7 +186,7 @@ int thread_mutex_trylock(thread_mutex_t *mutex)
/* Check recursive lock */
if (mutex->type & thread_mutex_type_recursive) {
if (mutex->owner == thread->private.id) {
if (mutex->owner == thread->scheduler.id) {
mutex->lock = mutex->lock + 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
@ -199,7 +195,7 @@ int thread_mutex_trylock(thread_mutex_t *mutex)
/* Check if the mutex is unlocked */
if (mutex->lock == 0) {
mutex->owner = thread->private.id;
mutex->owner = thread->scheduler.id;
mutex->lock = 1;
thread_atomic_stop();
return (thread_mutex_retval_success);
@ -218,7 +214,7 @@ int thread_mutex_unlock(thread_mutex_t *mutex)
thread_atomic_start();
thread = thread_mutex_validity_check(mutex);
if (thread == NULL
|| mutex->owner != thread->private.id
|| mutex->owner != thread->scheduler.id
|| mutex->lock == 0) {
thread_atomic_stop();
return (thread_mutex_retval_error);

View File

@ -1,72 +1,271 @@
//---
// gint:thread:scheduler - High-level scheduler part
// gint:thread:scheduler - Scheduler module
//---
#include <gint/thread.h>
#include <gint/timer.h>
#include <gint/std/stdlib.h>
#include <gint/display.h>
/* symbols defined in <gint/thread.thread.c> */
extern struct thread *thread_scheduler_queue;
extern struct thread *thread_scheduler_current;
extern struct thread thread_kernel_idle;
/* define symbols that will be used bu the kernel to communicate with us */
struct thread *thread_sched_queue;
struct thread *thread_sched_current;
uint32_t thread_sched_counter;
thread_t thread_sched_uuid;
/* define symbols used to pre-calculate scheudler timer related information */
uint32_t thread_sched_tmu_interrupt_id;
uint32_t thread_sched_tmu_tcr_addr;
uint32_t thread_sched_tmu_tcr_mask;
uint32_t thread_sched_tmu_tstr_mask;
uint32_t thread_sched_tmu_tstr_addr;
uint32_t thread_sched_tmu_tcor_addr;
uint32_t thread_sched_tmu_tcnt_addr;
int thread_sched_gint_timer_id;
//---
// Driver primitives
//---
/* thread_sched_init(): Initialize the scheduler */
void thread_sched_init(void)
{
struct timer_debug_info info;
uint64_t delay;
/* initialize kernel information */
thread_sched_queue = NULL;
thread_sched_current = NULL;
thread_sched_counter = 0;
thread_sched_uuid = 0;
/* generate the delay in us */
delay = 1000000 / THREAD_SCHEDULER_FREQUENCY;
if(delay == 0)
delay = 1;
/* try to lock one timer */
thread_sched_gint_timer_id = timer_setup(TIMER_ANY, delay, NULL);
if (thread_sched_gint_timer_id < 0)
return;
/* pre-calculate timer information, used by the
"thread_kernel_sched_enty()" kernel function (see
<gint/thread/kernel.S> for more information ) */
timer_debug_get_hw_info(thread_sched_gint_timer_id, &info);
thread_sched_tmu_tcr_addr = (uintptr_t)info.hardware.address.tcr;
thread_sched_tmu_tcr_mask = (uintptr_t)info.interrupt.mask.tcr.unf;
thread_sched_tmu_tstr_addr = (uintptr_t)info.hardware.address.tstr;
thread_sched_tmu_tstr_mask = (uintptr_t)info.interrupt.mask.tstr.str;
thread_sched_tmu_tcor_addr = (uintptr_t)info.hardware.address.tcor;
thread_sched_tmu_tcnt_addr = (uintptr_t)info.hardware.address.tcnt;
thread_sched_tmu_interrupt_id = (uintptr_t)info.interrupt.id;
}
/* thread_shced_uninit(): Uninitialize the scheduler */
void thread_sched_uninit(void)
{
struct thread **thread;
struct thread *tmp;
thread_atomic_start();
thread_sched_stop();
thread = &thread_sched_queue;
while (*thread != NULL) {
tmp = *thread;
*thread = (*thread)->scheduler.next;
//thread_kill(tmp, 0);
thread_sched_remove(tmp);
}
}
//---
// User interface
//---
/* thread_sched_start(): Start the scheduler timer */
void thread_sched_start(void)
{
thread_atomic_start();
timer_start(thread_sched_gint_timer_id);
thread_atomic_stop();
}
/* thread_shced_stop(): */
void thread_sched_stop(void)
{
thread_atomic_start();
timer_pause(thread_sched_gint_timer_id);
thread_atomic_stop();
}
/* thread_sched_add(): Add thread to the scheduler queue */
int thread_sched_add(struct thread *thread)
{
thread_atomic_start();
/* link the thread */
thread->scheduler.id = thread_sched_uuid;
thread->scheduler.status = THREAD_STATUS_RUNNING;
thread->scheduler.next = thread_sched_queue;
thread_sched_queue = thread;
/* update internal information */
thread_sched_uuid = thread_sched_uuid + 1;
thread_sched_counter = thread_sched_counter + 1;
thread_atomic_stop();
return (0);
}
/* thread_sched_remove(): Add thread to the scheduler queue */
int thread_sched_remove(struct thread *thread)
{
struct thread **parent;
/* Try to find the thread's parent */
thread_atomic_start();
parent = &thread_sched_queue;
while (*parent != NULL) {
if (*parent == thread)
break;
parent = &(*parent)->scheduler.next;
}
if (parent == NULL) {
thread_atomic_stop();
return (-1);
}
/* unlink the thread */
*parent = thread->scheduler.next;
/* remove scheduler information */
thread->scheduler.id = -1;
thread->scheduler.next = NULL;
/* update internal information */
thread_sched_counter = thread_sched_counter - 1;
thread_atomic_stop();
return (0);
}
/* thread_sched_get_current(): Get the current thread */
struct thread *thread_sched_get_current(void)
{
return(thread_sched_current);
}
//---
// Kernel interface
//---
/* thread_sched_check(): Check thread validity
@return:
* 0 can be used
* -1 cannot be loaded
* -2 has been removed
*/
static int thread_sched_check(struct thread *thread)
{
if (thread->scheduler.status != THREAD_STATUS_RUNNING)
return (-1);
switch (thread_signals_pending_deliver(thread)) {
case thread_signals_deliver_retval_running: return (0);
case thread_signals_deliver_retval_stopped: return (-1);
case thread_signals_deliver_retval_dead:
default:
if (thread_terminate(thread) == 0)
return (-2);
}
return (-1);
}
/* thread_schedule(): Schedule thread queue and return the next thread
This function SHOULD NOT be involved manually, it should be be involved only
by the scheduler "handler" (see <gint/thread/kernel.S>). */
by the scheduler "handler" (see <gint/thread/kernel.S>). If you know what
you are doing and whant to call this function, you SHOULD do it in "atomic"
environment. */
struct thread *thread_schedule(void)
{
struct thread *thread;
struct thread *next;
/* Check potential error. */
if (thread_scheduler_queue == NULL)
if (thread_sched_queue == NULL)
return (NULL);
/* if we have no currently running thread, return directly
TODO: stop the scheduler timer until multiple thread is detected. */
if (thread_scheduler_current == NULL) {
thread_scheduler_current = thread_scheduler_queue;
return (thread_scheduler_queue);
}
/* if we have no currently running thread, return directly */
if (thread_sched_current == NULL)
return (thread_sched_queue);
/* Check all scheduler thread in the queue by checking their status and
pending signals. */
thread = thread_scheduler_current;
do {
/* Get the potential next thread */
thread = thread->private.next;
if (thread == NULL)
thread = thread_scheduler_queue;
/* pre-calculate the next thread */
next = thread_sched_current;
if (thread_sched_current == NULL)
next = thread_sched_queue;
next = next->scheduler.next;
if (next == NULL)
next = thread_sched_queue;
/* Check if the potential next thread is valid */
/* A thread who has a pending signal which involve user signals
handler is considered like a valid thread, this is why we
deliver signals now. */
if (thread->status == THREAD_STATUS_RUNNING
&& thread_signals_pending_deliver(thread) == 0) {
break;
}
} while (thread != thread_scheduler_current);
do {
/* Get the potential next thread because the current can be
destroyed during operations */
thread = next;
next = next->scheduler.next;
if (next == NULL)
next = thread_sched_queue;
/* Check if no thread has been found, check its signals and see if the
context can be loaded again. */
if (thread == thread_scheduler_current) {
if (thread->status == THREAD_STATUS_RUNNING
&& thread_signals_pending_deliver(thread) == 0) {
/* Check the thread validity */
if (thread_sched_check(thread) == 0)
return (thread);
}
} while (thread != thread_sched_current);
/* If no thread has been found, load idle kernel thread which
will only wait the next scheduler timer intervention.
(see <gint/thread/thread.c> for more information). */
return (&thread_kernel_idle);
}
/* return the next thread */
thread_scheduler_current = thread;
return (thread);
/* If no thread has been found, load idle kernel thread which
will only wait the next scheduler timer intervention.
(see <gint/thread/thread.c> for more information). */
return (thread_idle_get());
}
/* thread_sched_find(): Find the thread using its identifier */
struct thread *thread_sched_find(thread_t id)
{
struct thread *thread;
thread_atomic_start();
thread = thread_sched_queue;
while (thread != NULL) {
if (thread->scheduler.id == id) {
thread_atomic_stop();
return (thread);
}
thread = thread->scheduler.next;
}
thread_atomic_stop();
return (NULL);
}
/* invalidate the current thread to avoid context saving */
/* TODO: find better way to do the job */
/* TODO: doc */
void thread_sched_invalidate(void)
{
thread_sched_current = NULL;
}
//---
// Debugging interface
//---
#ifdef THREAD_SCHEDULER_DEBUG
#include <gint/display.h>
int context_switch_counter = 0;
/* thread_schedule_debug(): Debug scheduler, involved each context_switch */
struct thread *thread_schedule_debug(struct thread *thread)
{
@ -107,20 +306,22 @@ struct thread *thread_schedule_debug(struct thread *thread)
while (1);
*/
static int context_switch_counter = 0;
context_switch_counter = context_switch_counter + 1;
if (context_switch_counter == 2) {
dclear(C_WHITE);
dprint(1, 51, C_BLACK, "next process -> %p", thread);
dprint(1, 61, C_BLACK, "|--spc: %p", thread->context.cpu.spc);
dprint(1, 71, C_BLACK, "|--ssr: %p", thread->context.cpu.ssr);
dprint(1, 81, C_BLACK, "|--r15: %p", thread->context.cpu.reg[15]);
r61524_display(gint_vram, 0, 224, R61524_CPU);
while (1);
}
// if (context_switch_counter < 3)
return (thread);
uint8_t tstr_before = ((uint8_t *)thread_sched_tmu_tstr_addr)[0];
uint8_t tstr_after = ((uint8_t *)thread_sched_tmu_tstr_addr)[0] & (~thread_sched_tmu_tstr_mask);
return (thread);
dclear(C_WHITE);
dprint(1, 11, C_BLACK, "next process -> %p", thread);
dprint(1, 21, C_BLACK, "TMU -> ID: %#x", (uint8_t *)thread_sched_gint_timer_id);
dprint(1, 31, C_BLACK, "TMU -> TSTR: %#x", tstr_before);
dprint(1, 41, C_BLACK, "TMU -> TCOR: %#x", ((uint32_t*)thread_sched_tmu_tcor_addr)[0]);
dprint(1, 51, C_BLACK, "TMU -> TCNT: %#x", ((uint32_t*)thread_sched_tmu_tcnt_addr)[0]);
dprint(1, 71, C_BLACK, "TMU -> TSTR %#x", thread_sched_tmu_tstr_mask);
dprint(1, 81, C_BLACK, "TMU -> TSTR %#x", tstr_after);
dupdate();
while (1);
}
#endif

View File

@ -2,11 +2,174 @@
// gint:thread:signal - Thread Signals management
//---
#include <gint/thread.h>
#include <gint/std/string.h>
/* thread_signals_deliver_pending(): Deliver all pending signals */
int thread_signals_pending_deliver(struct thread *thread)
//---
// internals functions
//---
/* thread_signals_deliver(): Deliver a signal */
static int thread_signals_deliver(struct thread *thread, int sig)
{
//TODO
(void)thread;
// check if the signal is ignored
if (thread->signals.handler[sig] == SIG_IGN)
return (thread_signals_deliver_retval_running);
// Check custom signal managment
if (thread->signals.handler[sig] != SIG_DFL) {
/* save current context into user's stack and update stack */
thread->context.reg[15] -= sizeof(struct cpu_ctx) + 3;
thread->context.reg[15] = thread->context.reg[15] >> 2 << 2;
memcpy((void*)(uintptr_t)thread->context.reg[15],
&thread->context, sizeof(struct cpu_ctx));
/* clean context and set the handler */
for (int i = 0 ; i < 14 ; i = i + 1)
thread->context.reg[i] = 0x00000000;
thread->context.gbr = 0x00000000;
thread->context.macl = 0x00000000;
thread->context.mach = 0x00000000;
thread->context.ssr = 0x40000000;
thread->context.spc = (uintptr_t)thread->signals.handler[sig];
thread->context.pr = (uintptr_t)&thread_signals_sigreturn;
/* send the signal number */
thread->context.reg[4] = sig;
return (thread_signals_deliver_retval_running);
}
/* default actions */
switch (sig) {
/* Stop the current thread (TODO: stop all child ?) */
case SIGSTOP:
thread->scheduler.status = THREAD_STATUS_PAUSED;
return (thread_signals_deliver_retval_stopped);
/* Wake up the parent */
case SIGCONT:
if (thread->private.parent != NULL) {
thread_signals_raise(
thread->private.parent, SIGCHLD);
}
return (0);
/* Nothing, just wake up the process */
case SIGCHLD:
return (0);
/* Process terination (default) */
case SIGTERM: // termination (TODO: cleanup part !!!)
case SIGINT: // interruption
case SIGHUP: // hang up
case SIGKILL: // kill
case SIGBUS: // bus error
case SIGFPE: // fatal arithmetic error
case SIGSEGV: // segmentation violation
default:
return (thread_signals_deliver_retval_dead);
}
}
//---
// Kernel interface
//---
/* thread_signals_raise(): Tricky part to handle signals
@return:
* 1 Cannot be scheduled
* 0 Can be scheduled
* -1 The thread SHOULD be removed !
*/
int thread_signals_raise(struct thread *thread, int sig)
{
if (sig >= NSIG)
return (-1);
thread_atomic_start();
if (thread == NULL
|| thread->scheduler.status == THREAD_STATUS_ZOMBIE) {
thread_atomic_stop();
return (-1);
}
thread->scheduler.status = THREAD_STATUS_RUNNING;
thread->signals.pending |= 1 << sig;
thread_atomic_stop();
return (0);
}
/* thread_signals_deliver_pending(): Deliver all pending signals
@return
* 0 - can be scheduled
* negative value - should not be scheduled
*/
#include <gint/display.h>
int thread_signals_pending_deliver(struct thread *thread)
{
sigset_t sig;
sigset_t block;
int retval;
retval = 0;
thread_atomic_start();
block = thread->signals.blocking;
block = block & ~(1 << SIGKILL);
block = block & ~(1 << SIGSTOP);
block = block & ~(1 << SIGTERM);
sig = thread->signals.pending & (~block);
for (int i = 0; i < NSIG && retval == 0; ++i) {
if ((sig & (1 << i)) == 0)
continue;
retval = thread_signals_deliver(thread, i);
thread->signals.pending &= ~(1 << i);
}
thread_atomic_stop();
return (retval);
}
/* thread_signals_sigreturn(): TODO */
void thread_signals_sigreturn(void)
{
struct thread *thread;
void *stack;
/* thread */
thread_atomic_start();
thread = thread_sched_get_current();
if (thread == NULL) {
thread_atomic_stop();
while (1) { thread_kernel_yield(); };
}
/* dump old context */
stack = (void*)(uintptr_t)thread->context.reg[15];
memcpy(&thread->context, stack, sizeof(struct cpu_ctx));
/* restore stack */
thread->context.reg[15] += (sizeof(struct cpu_ctx) + 3) >> 2 << 2;
thread_sched_invalidate();
/* force schedule */
thread_atomic_stop();
while (1) {
thread_kernel_yield();
}
}
/* thread_signals_replace(): TODO */
void (*thread_signals_replace(struct thread *thread,
int signum, void (*handler)(int)))(int)
{
void (*old)(int);
if (signum >= NSIG)
return (SIG_ERR);
thread_atomic_start();
if (thread == NULL
|| thread->scheduler.status == THREAD_STATUS_ZOMBIE) {
thread_atomic_stop();
return (SIG_ERR);
}
old = thread->signals.handler[signum];
thread->signals.handler[signum] = handler;
thread_atomic_stop();
return (old);
}

View File

@ -8,48 +8,38 @@
#include <gint/std/stdlib.h>
#include <gint/std/string.h>
/* Scheduler hardware pre-calculated information */
uint32_t thread_tmu_interrupt_id;
uint32_t thread_tmu_tcr_addr;
uint32_t thread_tmu_tcr_mask;
uint32_t thread_tmu_tstr_addr;
uint32_t thread_tmu_tstr_mask;
/* scheduler software information */
struct thread *thread_scheduler_queue;
struct thread *thread_scheduler_current;
int thread_scheduler_gint_timer_id;
/* Kernel information */
uint32_t thread_kernel_idle_stack[THREAD_KERNEL_IDLE_STACK_SIZE];
struct thread thread_kernel_idle;
/* other information */
thread_t thread_id_counter;
jmp_buf kerneljmp;
#include <gint/display.h>
#undef thread_create
//---
// Internal thread actions
//---
/* thread_find_by_id(): Try to find specific thread using his ID
This function SHOULD be called in "thread atomic" context ! */
static int thread_find_by_id(thread_t tid, struct thread ***target)
#if 0
/* thread_kill_child()
If the THREAD_ATTR_LONGJMP_ENABLE is set, this function is involved when the
thread is created. If the "longjmp()" feature is set then the "setjmp()"
function will return with an retval that differ from 0. In this case, it's
signifiacte that the created thread just died. So, we should kill all child
thread generated by him */
static void thread_kill_child(struct thread **thread)
{
struct thread **thread;
/* walking */
if (*thread == NULL)
return;
thread_kill_child(&(*thread)->private.child);
thread_kill_child(&(*thread)->private.sibling);
thread = &thread_scheduler_queue;
while (*thread != NULL) {
if ((*thread)->private.id == tid) {
*target = thread;
return (0);
}
thread = &(*thread)->private.next;
}
return (-1);
/* destroy the thread */
/*thread_atomic_start();
thread_kill(*thread, 0);
thread_sched_remove(*thread);
*thread = NULL;*/
thread_atomic_stop();
}
#endif
@ -58,21 +48,18 @@ static int thread_find_by_id(thread_t tid, struct thread ***target)
//---
/* thread_create(): Create a new thread */
int thread_create(thread_t *tid, int nb_arg, void *function, ...)
int thread_create(thread_t *tid, thread_attr_t *attr, void *function, ...)
{
struct thread *thread;
uintptr_t *tmp;
struct thread *parent;
uint32_t argbuf[16];
int arg_counter;
uintptr_t arg;
va_list ap;
/* Check first created thread */
/* Start the scheduler to avoid small freeze with the first thread */
thread_atomic_start();
if (thread_scheduler_queue == NULL) {
if (setjmp(kerneljmp) != 0) {
thread_atomic_stop();
return (0xdeadbeef);
}
timer_start(thread_scheduler_gint_timer_id);
}
thread_sched_start();
/* create the new thread */
thread = (struct thread *)calloc(1, sizeof(struct thread));
@ -80,105 +67,201 @@ int thread_create(thread_t *tid, int nb_arg, void *function, ...)
thread_atomic_stop();
return (-1);
}
thread->private.stack = (uintptr_t)malloc(THREAD_STACK_SIZE);
thread->private.stack = (uintptr_t)malloc(THREAD_STACK_SIZE + 4);
if (thread->private.stack == 0x00000000) {
free(thread);
thread_atomic_stop();
return (-1);
}
thread->context.cpu.reg[15] = thread->private.stack + THREAD_STACK_SIZE;
thread->status = THREAD_STATUS_RUNNING;
thread->context.cpu.pr = (uintptr_t)thread_kernel_terminate_trampoline;
thread->context.cpu.spc = (uintptr_t)function;
thread->context.cpu.ssr = 0x40000000;
thread->context.reg[15] = ((thread->private.stack + 3) >> 2 << 2);
thread->context.reg[15] += ((THREAD_STACK_SIZE + 3) >> 2 << 2);
thread->context.pr = (uintptr_t)thread_kernel_terminate_trampoline;
thread->context.spc = (uintptr_t)function;
thread->context.ssr = 0x40000000;
/* dump arguments */
va_start(ap, function);
for (int i = 0; i < nb_arg; ++i) {
if (i <= 7) {
thread->context.cpu.reg[4 + i] = va_arg(ap, uintptr_t);
continue;
for (arg_counter = 0; arg_counter <= 16; ++arg_counter) {
arg = va_arg(ap, uint32_t);
if (arg == THREAD_CREATE_ARG_END_WATERMARK)
break;
if (arg_counter < 4) {
thread->context.reg[arg_counter + 4] = arg;
} else {
argbuf[arg_counter - 4] = arg;
}
tmp = (uintptr_t*)((uintptr_t)thread->context.cpu.reg[15]);
tmp[0 - i] = va_arg(ap, uintptr_t);
}
va_end(ap);
if (arg_counter >= 4) {
arg_counter -= 4;
thread->context.reg[15] -= arg_counter << 2;
memcpy((void*)(uintptr_t)thread->context.reg[15],
argbuf, arg_counter << 2);
}
/* link the thread to the queue */
thread->private.next = thread_scheduler_queue;
thread_scheduler_queue = thread;
/* dump attribute if needed */
if (attr != NULL) {
memcpy(&thread->attr, attr, sizeof(struct thread_attr_s));
/* generate the thread ID */
*tid = thread_id_counter;
thread->private.id = thread_id_counter;
thread_id_counter = thread_id_counter + 1;
/* check longjmp() feature */
if (attr->state.longjmp == THREAD_ATTR_LONGJMP_ENABLE) {
if (setjmp(thread->private.jmpbuf) != 0) {
thread_atomic_stop();
return (THREAD_CREATE_LONGJMP_RETURN);
}
}
/* check joinable state */
if (attr->state.detach == THREAD_ATTR_JOINABLE) {
parent = thread_sched_get_current();
if (parent != NULL) {
thread->private.parent = parent;
thread->private.sibling = parent->private.child;
parent->private.child = thread;
}
}
}
/* link the thread to the scheduler */
thread_sched_add(thread);
*tid = thread->scheduler.id;
/* initialize signals */
for (int i = 0; i < NSIG; ++i)
thread->signals.handler[i] = SIG_DFL;
thread->signals.pending = 0x00000000;
thread->signals.blocking = 0x00000000;
thread_atomic_stop();
return (0);
}
/* thread_tryjoin()
Same behaviour than thread_join() except that if the thread is not in the
ZOMBIE_STATE its return directly with an error. */
int thread_tryjoin(thread_t id, void **retval)
{
struct thread *thread;
/* Try to find the thread */
thread_atomic_start();
thread = thread_sched_find(id);
if (thread == NULL) {
thread_atomic_stop();
return (-1);
}
/* check its status */
if (thread->scheduler.status != THREAD_STATUS_ZOMBIE) {
thread_atomic_stop();
return (-2);
}
/* destroy the thread */
if (retval != NULL)
*retval = (void*)((uintptr_t)thread->private.ret);
thread_sched_remove(thread);
free(thread);
thread_atomic_stop();
return (0);
}
/* thread_join(): Wait a thread terminason */
int thread_join(thread_t tid, void **retval)
int thread_join(thread_t id, void **retval)
{
struct thread **thread;
void *tmp;
/* wait until the target thread is not in the zombies state */
while (1) {
/* Try to find the thread and check her status */
thread_atomic_start();
if (thread_find_by_id(tid, &thread) != 0) {
thread_atomic_stop();
return (-1);
}
if ((*thread)->status == THREAD_STATUS_ZOMBIE)
break;
/* clean wait (TODO: sched_yield()) */
thread_atomic_stop();
__asm__ volatile ("sleep");
while (thread_tryjoin(id, retval) != 0) {
thread_kernel_yield();
}
/* destroy the thread */
tmp = *thread;
*retval = (void*)((uintptr_t)(*thread)->private.ret);
*thread = (*thread)->private.next;
free(tmp);
return (0);
}
/* thread_exit(): Terminate the calling thread */
//TODO: cleanup_push() all unpopped clean-up handlers !
//TODO: block any signals !
void thread_exit(void *retval)
{
/* free'd thread internal memory */
struct thread *thread;
/* get the current thread */
thread_atomic_start();
thread_scheduler_current->status = THREAD_STATUS_ZOMBIE;
thread_scheduler_current->private.ret = retval;
free((void *)thread_scheduler_current->private.stack);
thread_atomic_stop();
/* Check if the thread is the last */
if (thread_scheduler_queue == thread_scheduler_current
&& thread_scheduler_current->private.next == NULL) {
timer_pause(thread_scheduler_gint_timer_id);
longjmp(kerneljmp, 1);
thread = thread_sched_get_current();
if (thread == NULL) {
thread_atomic_stop();
while (1) { thread_kernel_yield(); };
}
/* wait until a scheduler interrupt */
/* Check thread longjmp() attribute feature */
if (thread->attr.private.watermark == THREAD_ATTR_WATERMARK
&& thread->attr.state.longjmp == THREAD_ATTR_LONGJMP_ENABLE) {
longjmp(thread->private.jmpbuf, 1);
}
/* block all signals */
thread->signals.blocking = -1;
thread->private.ret = (uintptr_t)retval;
thread_signals_raise(thread, SIGTERM);
thread_atomic_stop();
/* Wait and yield */
while (1) {
__asm__ volatile ("sleep");
thread_kernel_yield();
}
}
/* thread_yield(): Cause the calling thread to relinquish the CPU */
extern void thread_yield(void)
/* thread_kill(): Destroy the thread */
int thread_kill(thread_t id, int sig)
{
/* involve kernel directly */
thread_kernel_yield();
struct thread *thread;
int ret;
thread_atomic_start();
thread = thread_sched_find(id);
if (thread == NULL) {
thread_atomic_stop();
return (-1);
}
ret = thread_signals_raise(thread, sig);
thread_atomic_stop();
return (ret);
}
/* thread_signal(): set the disposition of the signal signum to handler */
void (*thread_signal(int signum, void (*handler)(int)))(int)
{
struct thread *thread;
void (*ret)(int);
thread_atomic_start();
thread = thread_sched_get_current();
if (thread == NULL) {
thread_atomic_stop();
return (SIG_ERR);
}
ret = thread_signals_replace(thread, signum, handler);
thread_atomic_stop();
return (ret);
}
//---
// Kernel interface
//---
/* thread_terminate(): Terminate a thread */
int thread_terminate(struct thread *thread)
{
/* freed the stack */
if (thread->private.stack != 0x00000000)
free((void*)thread->private.stack);
/* check is the thread is detached*/
if (thread->attr.private.watermark == THREAD_ATTR_WATERMARK
&& thread->attr.state.detach == THREAD_ATTR_DETACHED) {
thread_sched_remove(thread);
free(thread);
return (0);
}
/* the thread is joinable, wait until someone read its ret value
(the thread will be destroyed in the "thread_tryjoin()" function ) */
thread->scheduler.status = THREAD_STATUS_ZOMBIE;
return (1);
}
//---
// Driver part
@ -187,36 +270,11 @@ extern void thread_yield(void)
/* init(): setup the scheduler */
static void init(void)
{
struct timer_debug_info tinfo;
int delay;
/* initialize the scheduler */
thread_sched_init();
/* initialize scheduler */
thread_scheduler_queue = NULL;
thread_scheduler_current = NULL;
thread_id_counter = 0;
/* intialize idle thread */
memset(&thread_kernel_idle, 0x00, sizeof(struct thread));
thread_kernel_idle.context.cpu.reg[15] =
(uintptr_t)&thread_kernel_idle_stack;
thread_kernel_idle.context.cpu.reg[15] +=
THREAD_KERNEL_IDLE_STACK_SIZE;
thread_kernel_idle.context.cpu.spc =
(uintptr_t)&thread_kernel_idle_code;
thread_kernel_idle.context.cpu.pr =
(uintptr_t)&thread_kernel_terminate_trampoline;
/* setup scheduler's timer */
delay = 1000000 / THREAD_SCHEDULER_FREQUENCY;
if(!delay) delay = 1;
thread_scheduler_gint_timer_id = timer_setup(TIMER_ANY, delay, NULL);
if (thread_scheduler_gint_timer_id < 0) return;
timer_debug_get_hw_info(thread_scheduler_gint_timer_id, &tinfo);
thread_tmu_tcr_addr = (uintptr_t)tinfo.hardware.address.tcr;
thread_tmu_tcr_mask = (uintptr_t)tinfo.interrupt.mask.tcr.unf;
thread_tmu_tstr_addr = (uintptr_t)tinfo.hardware.address.tstr;
thread_tmu_tstr_mask = (uintptr_t)tinfo.interrupt.mask.tstr.str;
thread_tmu_interrupt_id = (uintptr_t)tinfo.interrupt.id;
/* intialize the idle thread */
thread_idle_init();
}
/* The thread scheduler is consider like a driver */