py/scheduler: Fix race in checking scheduler pending state.

Because the atomic section starts after checking whether the scheduler
state is pending, it's possible it can become a different state by the time
the atomic section starts.

This is especially likely on ports where MICROPY_BEGIN_ATOMIC_SECTION is
implemented with a mutex (i.e. it might block), but the race exists
regardless, i.e. if a context switch occurs between those two lines.
This commit is contained in:
Jim Mussared 2020-04-03 14:15:18 +11:00 committed by Damien George
parent c2cfbcc8d4
commit 243805d776
2 changed files with 29 additions and 19 deletions

View File

@ -60,22 +60,27 @@ static inline bool mp_sched_empty(void) {
void mp_handle_pending(bool raise_exc) {
if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
if (obj != MP_OBJ_NULL) {
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
if (!mp_sched_num_pending()) {
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
}
if (raise_exc) {
MICROPY_END_ATOMIC_SECTION(atomic_state);
nlr_raise(obj);
// Re-check state is still pending now that we're in the atomic section.
if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
if (obj != MP_OBJ_NULL) {
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
if (!mp_sched_num_pending()) {
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
}
if (raise_exc) {
MICROPY_END_ATOMIC_SECTION(atomic_state);
nlr_raise(obj);
}
}
mp_handle_pending_tail(atomic_state);
} else {
MICROPY_END_ATOMIC_SECTION(atomic_state);
}
mp_handle_pending_tail(atomic_state);
}
}
// This function should only be called be mp_sched_handle_pending,
// This function should only be called by mp_handle_pending,
// or by the VM's inlined version of that function.
void mp_handle_pending_tail(mp_uint_t atomic_state) {
MP_STATE_VM(sched_state) = MP_SCHED_LOCKED;

21
py/vm.c
View File

@ -1366,18 +1366,23 @@ pending_exception_check:
#if MICROPY_ENABLE_SCHEDULER
// This is an inlined variant of mp_handle_pending
if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
MARK_EXC_IP_SELECTIVE();
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
if (obj != MP_OBJ_NULL) {
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
if (!mp_sched_num_pending()) {
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
// Re-check state is still pending now that we're in the atomic section.
if (MP_STATE_VM(sched_state) == MP_SCHED_PENDING) {
MARK_EXC_IP_SELECTIVE();
mp_obj_t obj = MP_STATE_VM(mp_pending_exception);
if (obj != MP_OBJ_NULL) {
MP_STATE_VM(mp_pending_exception) = MP_OBJ_NULL;
if (!mp_sched_num_pending()) {
MP_STATE_VM(sched_state) = MP_SCHED_IDLE;
}
MICROPY_END_ATOMIC_SECTION(atomic_state);
RAISE(obj);
}
mp_handle_pending_tail(atomic_state);
} else {
MICROPY_END_ATOMIC_SECTION(atomic_state);
RAISE(obj);
}
mp_handle_pending_tail(atomic_state);
}
#else
// This is an inlined variant of mp_handle_pending