vxKernel/src/driver/mpu/sh/sh7305/tmu/profiling.c

113 lines
2.5 KiB
C

#include <vhex/timer.h>
#include <vhex/driver/mpu/sh/sh7305/tmu.h>
#include <vhex/driver/mpu/sh/sh7305/cpg.h>
#include <vhex/driver/cpu.h>
uint32_t volatile *tmu_prof_tcnt = NULL;
static int tmu_prof_counter = 0;
static int tmu_prof_timer = -1;
extern int available(int id);
/* Shortcut to set registers that are slow to update */
#define set(lval, rval) do(lval = rval); while(rval != lval)
//---
// Public profiling API
//---
/* sh7305_tmu_prof_make() : initialise a new */
int sh7305_tmu_prof_init(timer_prof_t *prof)
{
cpu_atomic_start();
if (tmu_prof_tcnt == NULL) {
tmu_prof_timer = -1;
for(int t = 0; t < 2; t++)
{
if (!available(t))
continue;
tmu_prof_timer = t;
tmu_prof_tcnt = &SH7305_TMU.TMU[t].TCNT;
SH7305_TMU.TMU[t].TCOR = 0xffffffff;
SH7305_TMU.TMU[t].TCNT = 0xffffffff;
SH7305_TMU.TMU[t].TCR.TPSC = TIMER_Pphi_4;
set(SH7305_TMU.TMU[t].TCR.UNF, 0);
SH7305_TMU.TMU[t].TCR.UNIE = 0;
SH7305_TMU.TMU[t].TCR.CKEG = 0;
SH7305_TMU.TSTR |= 1 << t;
break;
}
if (tmu_prof_timer < 0) {
cpu_atomic_end();
return (-1);
}
}
if (prof != NULL) {
prof->rec = 0;
prof->elapsed = 0;
}
tmu_prof_counter += 1;
cpu_atomic_end();
return (0);
}
/* sh7305_tmu_prof_enter(): Start counting time for a function */
void sh7305_tmu_prof_enter(timer_prof_t *prof)
{
if (tmu_prof_tcnt != NULL)
prof->elapsed += *tmu_prof_tcnt;
}
/* sh7305_tmu_prof_enter_rec(): Start counting time for a recursive function */
void sh7305_tmu_prof_enter_rec(timer_prof_t *prof)
{
if (!prof->rec++ && tmu_prof_tcnt != NULL)
prof->elapsed += *tmu_prof_tcnt;
}
/* sh7305_tmu_prof_leave_rec(): Start counting time for a recursive function */
void sh7305_tmu_prof_leave_rec(timer_prof_t *prof)
{
if (!--prof->rec && tmu_prof_tcnt != NULL)
prof->elapsed -= *tmu_prof_tcnt;
}
/* sh7305_tmu_prof_leave(): Stop counting time for a function */
void sh7305_tmu_prof_leave(timer_prof_t *prof)
{
if (tmu_prof_tcnt != NULL)
prof->elapsed -= *tmu_prof_tcnt;
}
/* sh7305_prof_time(): Time spent in a given context, in microseconds */
uint32_t sh7305_tmu_prof_time(timer_prof_t *prof)
{
struct cpg_clock_frequency cpg_freq;
cpg_clock_freq(&cpg_freq);
return ((uint64_t)prof->elapsed * 4 * 1000000) / cpg_freq.Pphi_f;
}
/* sh7305_tmu_prof_quit() : uninit the profoling object */
int sh7305_tmu_prof_quit(timer_prof_t *prof)
{
(void)prof;
cpu_atomic_start();
if (--tmu_prof_counter <= 0) {
if(tmu_prof_timer >= 0)
sh7305_tmu_stop(tmu_prof_timer);
tmu_prof_timer = -1;
tmu_prof_tcnt = NULL;
}
cpu_atomic_end();
}