rp2/rp2_flash: Lockout second core only when doing flash erase/write.

Using the multicore lockout feature in the general atomic section makes it
much more difficult to get correct.

Signed-off-by: Damien George <damien@micropython.org>
This commit is contained in:
Damien George 2024-01-02 01:03:13 +11:00
parent 3d0b6276f3
commit c3989e398f
2 changed files with 22 additions and 13 deletions

View File

@ -52,9 +52,6 @@ uint32_t mp_thread_begin_atomic_section(void) {
// When both cores are executing, we also need to provide
// full mutual exclusion.
mp_thread_mutex_lock(&atomic_mutex, 1);
// In case this atomic section is for flash access, then
// suspend the other core.
multicore_lockout_start_blocking();
}
return save_and_disable_interrupts();
@ -64,7 +61,6 @@ void mp_thread_end_atomic_section(uint32_t state) {
restore_interrupts(state);
if (core1_entry) {
multicore_lockout_end_blocking();
mp_thread_mutex_unlock(&atomic_mutex);
}
}

View File

@ -70,6 +70,22 @@ bi_decl(bi_block_device(
BINARY_INFO_BLOCK_DEV_FLAG_WRITE |
BINARY_INFO_BLOCK_DEV_FLAG_PT_UNKNOWN));
// Flash erase and write must run with interrupts disabled and the other core suspended,
// because the XIP bit gets disabled.
static uint32_t begin_critical_flash_section(void) {
if (multicore_lockout_victim_is_initialized(1 - get_core_num())) {
multicore_lockout_start_blocking();
}
return save_and_disable_interrupts();
}
static void end_critical_flash_section(uint32_t state) {
restore_interrupts(state);
if (multicore_lockout_victim_is_initialized(1 - get_core_num())) {
multicore_lockout_end_blocking();
}
}
STATIC mp_obj_t rp2_flash_make_new(const mp_obj_type_t *type, size_t n_args, size_t n_kw, const mp_obj_t *all_args) {
// Parse arguments
enum { ARG_start, ARG_len };
@ -135,19 +151,17 @@ STATIC mp_obj_t rp2_flash_writeblocks(size_t n_args, const mp_obj_t *args) {
mp_buffer_info_t bufinfo;
mp_get_buffer_raise(args[2], &bufinfo, MP_BUFFER_READ);
if (n_args == 3) {
// Flash erase/program must run in an atomic section because the XIP bit gets disabled.
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_uint_t atomic_state = begin_critical_flash_section();
flash_range_erase(self->flash_base + offset, bufinfo.len);
MICROPY_END_ATOMIC_SECTION(atomic_state);
end_critical_flash_section(atomic_state);
mp_event_handle_nowait();
// TODO check return value
} else {
offset += mp_obj_get_int(args[3]);
}
// Flash erase/program must run in an atomic section because the XIP bit gets disabled.
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_uint_t atomic_state = begin_critical_flash_section();
flash_range_program(self->flash_base + offset, bufinfo.buf, bufinfo.len);
MICROPY_END_ATOMIC_SECTION(atomic_state);
end_critical_flash_section(atomic_state);
mp_event_handle_nowait();
// TODO check return value
return mp_const_none;
@ -170,10 +184,9 @@ STATIC mp_obj_t rp2_flash_ioctl(mp_obj_t self_in, mp_obj_t cmd_in, mp_obj_t arg_
return MP_OBJ_NEW_SMALL_INT(BLOCK_SIZE_BYTES);
case MP_BLOCKDEV_IOCTL_BLOCK_ERASE: {
uint32_t offset = mp_obj_get_int(arg_in) * BLOCK_SIZE_BYTES;
// Flash erase/program must run in an atomic section because the XIP bit gets disabled.
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION();
mp_uint_t atomic_state = begin_critical_flash_section();
flash_range_erase(self->flash_base + offset, BLOCK_SIZE_BYTES);
MICROPY_END_ATOMIC_SECTION(atomic_state);
end_critical_flash_section(atomic_state);
// TODO check return value
return MP_OBJ_NEW_SMALL_INT(0);
}