diff --git a/py/asmarm.c b/py/asmarm.c index 72b37f73a..e91421578 100644 --- a/py/asmarm.c +++ b/py/asmarm.c @@ -303,6 +303,11 @@ void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs) { emit_al(as, 0x1a00010 | (rd << 12) | (rs << 8) | rd); } +void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs) { + // mov rd, rd, lsr rs + emit_al(as, 0x1a00030 | (rd << 12) | (rs << 8) | rd); +} + void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs) { // mov rd, rd, asr rs emit_al(as, 0x1a00050 | (rd << 12) | (rs << 8) | rd); diff --git a/py/asmarm.h b/py/asmarm.h index 825fd8840..46da661fa 100644 --- a/py/asmarm.h +++ b/py/asmarm.h @@ -101,6 +101,7 @@ void asm_arm_orr_reg_reg_reg(asm_arm_t *as, uint rd, uint rn, uint rm); void asm_arm_mov_reg_local_addr(asm_arm_t *as, uint rd, int local_num); void asm_arm_mov_reg_pcrel(asm_arm_t *as, uint reg_dest, uint label); void asm_arm_lsl_reg_reg(asm_arm_t *as, uint rd, uint rs); +void asm_arm_lsr_reg_reg(asm_arm_t *as, uint rd, uint rs); void asm_arm_asr_reg_reg(asm_arm_t *as, uint rd, uint rs); // memory @@ -187,6 +188,7 @@ void asm_arm_bx_reg(asm_arm_t *as, uint reg_src); #define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_arm_mov_reg_pcrel((as), (reg_dest), (label)) #define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_arm_lsl_reg_reg((as), (reg_dest), (reg_shift)) +#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) asm_arm_lsr_reg_reg((as), (reg_dest), (reg_shift)) #define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_arm_asr_reg_reg((as), (reg_dest), (reg_shift)) #define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_arm_orr_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) #define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_arm_eor_reg_reg_reg((as), (reg_dest), (reg_dest), (reg_src)) diff --git a/py/asmthumb.h b/py/asmthumb.h index 6d0ee4b76..9e6f242cc 100644 --- a/py/asmthumb.h +++ b/py/asmthumb.h @@ -345,6 +345,7 @@ void asm_thumb_bl_ind(asm_thumb_t *as, uint fun_id, uint reg_temp); // convenien #define ASM_MOV_REG_PCREL(as, rlo_dest, label) asm_thumb_mov_reg_pcrel((as), (rlo_dest), (label)) #define ASM_LSL_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSL, (reg_dest), (reg_shift)) +#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_LSR, (reg_dest), (reg_shift)) #define ASM_ASR_REG_REG(as, reg_dest, reg_shift) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ASR, (reg_dest), (reg_shift)) #define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_ORR, (reg_dest), (reg_src)) #define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_thumb_format_4((as), ASM_THUMB_FORMAT_4_EOR, (reg_dest), (reg_src)) diff --git a/py/asmx64.c b/py/asmx64.c index 723671d5a..fd64eaf98 100644 --- a/py/asmx64.c +++ b/py/asmx64.c @@ -67,6 +67,7 @@ // #define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */ // #define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */ #define OPCODE_SHL_RM64_CL (0xd3) /* /4 */ +#define OPCODE_SHR_RM64_CL (0xd3) /* /5 */ #define OPCODE_SAR_RM64_CL (0xd3) /* /7 */ // #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */ // #define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */ @@ -382,6 +383,10 @@ void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64) { asm_x64_generic_r64_r64(as, dest_r64, 4, OPCODE_SHL_RM64_CL); } +void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64) { + asm_x64_generic_r64_r64(as, dest_r64, 5, OPCODE_SHR_RM64_CL); +} + void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64) { asm_x64_generic_r64_r64(as, dest_r64, 7, OPCODE_SAR_RM64_CL); } diff --git a/py/asmx64.h b/py/asmx64.h index 28b1bd255..e73e3c5c5 100644 --- a/py/asmx64.h +++ b/py/asmx64.h @@ -98,6 +98,7 @@ void asm_x64_and_r64_r64(asm_x64_t *as, int dest_r64, int src_r64); void asm_x64_or_r64_r64(asm_x64_t *as, int dest_r64, int src_r64); void asm_x64_xor_r64_r64(asm_x64_t *as, int dest_r64, int src_r64); void asm_x64_shl_r64_cl(asm_x64_t *as, int dest_r64); +void asm_x64_shr_r64_cl(asm_x64_t *as, int dest_r64); void asm_x64_sar_r64_cl(asm_x64_t *as, int dest_r64); void asm_x64_add_r64_r64(asm_x64_t *as, int dest_r64, int src_r64); void asm_x64_sub_r64_r64(asm_x64_t *as, int dest_r64, int src_r64); @@ -190,6 +191,7 @@ void asm_x64_call_ind(asm_x64_t *as, size_t fun_id, int temp_r32); #define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x64_mov_reg_pcrel((as), (reg_dest), (label)) #define ASM_LSL_REG(as, reg) asm_x64_shl_r64_cl((as), (reg)) +#define ASM_LSR_REG(as, reg) asm_x64_shr_r64_cl((as), (reg)) #define ASM_ASR_REG(as, reg) asm_x64_sar_r64_cl((as), (reg)) #define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x64_or_r64_r64((as), (reg_dest), (reg_src)) #define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x64_xor_r64_r64((as), (reg_dest), (reg_src)) diff --git a/py/asmx86.c b/py/asmx86.c index 96de372ae..4b0f8047f 100644 --- a/py/asmx86.c +++ b/py/asmx86.c @@ -67,6 +67,7 @@ // #define OPCODE_SHR_RM32_BY_I8 (0xc1) /* /5 */ // #define OPCODE_SAR_RM32_BY_I8 (0xc1) /* /7 */ #define OPCODE_SHL_RM32_CL (0xd3) /* /4 */ +#define OPCODE_SHR_RM32_CL (0xd3) /* /5 */ #define OPCODE_SAR_RM32_CL (0xd3) /* /7 */ // #define OPCODE_CMP_I32_WITH_RM32 (0x81) /* /7 */ // #define OPCODE_CMP_I8_WITH_RM32 (0x83) /* /7 */ @@ -259,6 +260,10 @@ void asm_x86_shl_r32_cl(asm_x86_t *as, int dest_r32) { asm_x86_generic_r32_r32(as, dest_r32, 4, OPCODE_SHL_RM32_CL); } +void asm_x86_shr_r32_cl(asm_x86_t *as, int dest_r32) { + asm_x86_generic_r32_r32(as, dest_r32, 5, OPCODE_SHR_RM32_CL); +} + void asm_x86_sar_r32_cl(asm_x86_t *as, int dest_r32) { asm_x86_generic_r32_r32(as, dest_r32, 7, OPCODE_SAR_RM32_CL); } diff --git a/py/asmx86.h b/py/asmx86.h index 4855cd7ee..f28040abf 100644 --- a/py/asmx86.h +++ b/py/asmx86.h @@ -93,6 +93,7 @@ void asm_x86_and_r32_r32(asm_x86_t *as, int dest_r32, int src_r32); void asm_x86_or_r32_r32(asm_x86_t *as, int dest_r32, int src_r32); void asm_x86_xor_r32_r32(asm_x86_t *as, int dest_r32, int src_r32); void asm_x86_shl_r32_cl(asm_x86_t *as, int dest_r32); +void asm_x86_shr_r32_cl(asm_x86_t *as, int dest_r32); void asm_x86_sar_r32_cl(asm_x86_t *as, int dest_r32); void asm_x86_add_r32_r32(asm_x86_t *as, int dest_r32, int src_r32); void asm_x86_sub_r32_r32(asm_x86_t *as, int dest_r32, int src_r32); @@ -185,6 +186,7 @@ void asm_x86_call_ind(asm_x86_t *as, size_t fun_id, mp_uint_t n_args, int temp_r #define ASM_MOV_REG_PCREL(as, reg_dest, label) asm_x86_mov_reg_pcrel((as), (reg_dest), (label)) #define ASM_LSL_REG(as, reg) asm_x86_shl_r32_cl((as), (reg)) +#define ASM_LSR_REG(as, reg) asm_x86_shr_r32_cl((as), (reg)) #define ASM_ASR_REG(as, reg) asm_x86_sar_r32_cl((as), (reg)) #define ASM_OR_REG_REG(as, reg_dest, reg_src) asm_x86_or_r32_r32((as), (reg_dest), (reg_src)) #define ASM_XOR_REG_REG(as, reg_dest, reg_src) asm_x86_xor_r32_r32((as), (reg_dest), (reg_src)) diff --git a/py/asmxtensa.h b/py/asmxtensa.h index 5eb40daf7..43f1b608e 100644 --- a/py/asmxtensa.h +++ b/py/asmxtensa.h @@ -243,6 +243,10 @@ static inline void asm_xtensa_op_sll(asm_xtensa_t *as, uint reg_dest, uint reg_s asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 10, reg_dest, reg_src, 0)); } +static inline void asm_xtensa_op_srl(asm_xtensa_t *as, uint reg_dest, uint reg_src) { + asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 9, reg_dest, 0, reg_src)); +} + static inline void asm_xtensa_op_sra(asm_xtensa_t *as, uint reg_dest, uint reg_src) { asm_xtensa_op24(as, ASM_XTENSA_ENCODE_RRR(0, 1, 11, reg_dest, 0, reg_src)); } @@ -372,6 +376,11 @@ void asm_xtensa_call_ind_win(asm_xtensa_t *as, uint idx); asm_xtensa_op_ssl((as), (reg_shift)); \ asm_xtensa_op_sll((as), (reg_dest), (reg_dest)); \ } while (0) +#define ASM_LSR_REG_REG(as, reg_dest, reg_shift) \ + do { \ + asm_xtensa_op_ssr((as), (reg_shift)); \ + asm_xtensa_op_srl((as), (reg_dest), (reg_dest)); \ + } while (0) #define ASM_ASR_REG_REG(as, reg_dest, reg_shift) \ do { \ asm_xtensa_op_ssr((as), (reg_shift)); \