py: Don't use anonymous unions, name them instead.

This makes the code (more) compatible with the C99 standard.
This commit is contained in:
Damien George 2015-01-24 23:14:12 +00:00
parent 5c670acb1f
commit 32444b759a
3 changed files with 50 additions and 48 deletions

View File

@ -44,6 +44,23 @@
#define DEBUG_OP_printf(...) (void)0
#endif
struct _mp_raw_code_t {
mp_raw_code_kind_t kind : 3;
mp_uint_t scope_flags : 7;
mp_uint_t n_pos_args : 11;
mp_uint_t n_kwonly_args : 11;
union {
struct {
byte *code;
mp_uint_t len;
} u_byte;
struct {
void *fun_data;
mp_uint_t type_sig; // for viper, compressed as 2-bit types; ret is MSB, then arg0, arg1, etc
} u_native;
} data;
};
mp_raw_code_t *mp_emit_glue_new_raw_code(void) {
mp_raw_code_t *rc = m_new0(mp_raw_code_t, 1);
rc->kind = MP_CODE_RESERVED;
@ -55,8 +72,8 @@ void mp_emit_glue_assign_bytecode(mp_raw_code_t *rc, byte *code, mp_uint_t len,
rc->scope_flags = scope_flags;
rc->n_pos_args = n_pos_args;
rc->n_kwonly_args = n_kwonly_args;
rc->u_byte.code = code;
rc->u_byte.len = len;
rc->data.u_byte.code = code;
rc->data.u_byte.len = len;
#ifdef DEBUG_PRINT
DEBUG_printf("assign byte code: code=%p len=" UINT_FMT " n_pos_args=" UINT_FMT " n_kwonly_args=" UINT_FMT " flags=%x\n", code, len, n_pos_args, n_kwonly_args, (uint)scope_flags);
@ -74,8 +91,8 @@ void mp_emit_glue_assign_native(mp_raw_code_t *rc, mp_raw_code_kind_t kind, void
rc->kind = kind;
rc->scope_flags = 0;
rc->n_pos_args = n_args;
rc->u_native.fun_data = fun_data;
rc->u_native.type_sig = type_sig;
rc->data.u_native.fun_data = fun_data;
rc->data.u_native.type_sig = type_sig;
#ifdef DEBUG_PRINT
DEBUG_printf("assign native: kind=%d fun=%p len=" UINT_FMT " n_args=" UINT_FMT "\n", kind, fun_data, fun_len, n_args);
@ -113,19 +130,19 @@ mp_obj_t mp_make_function_from_raw_code(mp_raw_code_t *rc, mp_obj_t def_args, mp
switch (rc->kind) {
case MP_CODE_BYTECODE:
no_other_choice:
fun = mp_obj_new_fun_bc(rc->scope_flags, rc->n_pos_args, rc->n_kwonly_args, def_args, def_kw_args, rc->u_byte.code);
fun = mp_obj_new_fun_bc(rc->scope_flags, rc->n_pos_args, rc->n_kwonly_args, def_args, def_kw_args, rc->data.u_byte.code);
break;
#if MICROPY_EMIT_NATIVE
case MP_CODE_NATIVE_PY:
fun = mp_obj_new_fun_native(rc->n_pos_args, rc->u_native.fun_data);
fun = mp_obj_new_fun_native(rc->n_pos_args, rc->data.u_native.fun_data);
break;
case MP_CODE_NATIVE_VIPER:
fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->u_native.fun_data, rc->u_native.type_sig);
fun = mp_obj_new_fun_viper(rc->n_pos_args, rc->data.u_native.fun_data, rc->data.u_native.type_sig);
break;
#endif
#if MICROPY_EMIT_INLINE_THUMB
case MP_CODE_NATIVE_ASM:
fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->u_native.fun_data);
fun = mp_obj_new_fun_asm(rc->n_pos_args, rc->data.u_native.fun_data);
break;
#endif
default:

View File

@ -39,22 +39,7 @@ typedef enum {
MP_CODE_NATIVE_ASM,
} mp_raw_code_kind_t;
typedef struct _mp_raw_code_t {
mp_raw_code_kind_t kind : 3;
mp_uint_t scope_flags : 7;
mp_uint_t n_pos_args : 11;
mp_uint_t n_kwonly_args : 11;
union {
struct {
byte *code;
mp_uint_t len;
} u_byte;
struct {
void *fun_data;
mp_uint_t type_sig; // for viper, compressed as 2-bit types; ret is MSB, then arg0, arg1, etc
} u_native;
};
} mp_raw_code_t;
typedef struct _mp_raw_code_t mp_raw_code_t;
mp_raw_code_t *mp_emit_glue_new_raw_code(void);

View File

@ -480,7 +480,7 @@ typedef struct _stack_info_t {
union {
int u_reg;
mp_int_t u_imm;
};
} data;
} stack_info_t;
struct _emit_t {
@ -712,7 +712,7 @@ STATIC void adjust_stack(emit_t *emit, mp_int_t stack_size_delta) {
DEBUG_printf(" adjust_stack; stack_size=%d+%d; stack now:", emit->stack_size - stack_size_delta, stack_size_delta);
for (int i = 0; i < emit->stack_size; i++) {
stack_info_t *si = &emit->stack_info[i];
DEBUG_printf(" (v=%d k=%d %d)", si->vtype, si->kind, si->u_reg);
DEBUG_printf(" (v=%d k=%d %d)", si->vtype, si->kind, si->data.u_reg);
}
DEBUG_printf("\n");
#endif
@ -765,7 +765,7 @@ STATIC void emit_native_pre(emit_t *emit) {
case STACK_REG:
// TODO only push reg if in regs_needed
emit->stack_info[i].kind = STACK_VALUE;
ASM_MOV_REG_TO_LOCAL(emit->as, emit->stack_info[i].u_reg, emit->stack_start + i);
ASM_MOV_REG_TO_LOCAL(emit->as, emit->stack_info[i].data.u_reg, emit->stack_start + i);
break;
case STACK_IMM:
@ -795,9 +795,9 @@ STATIC void need_reg_single(emit_t *emit, int reg_needed, int skip_stack_pos) {
for (int i = 0; i < emit->stack_size; i++) {
if (i != skip_stack_pos) {
stack_info_t *si = &emit->stack_info[i];
if (si->kind == STACK_REG && si->u_reg == reg_needed) {
if (si->kind == STACK_REG && si->data.u_reg == reg_needed) {
si->kind = STACK_VALUE;
ASM_MOV_REG_TO_LOCAL(emit->as, si->u_reg, emit->stack_start + i);
ASM_MOV_REG_TO_LOCAL(emit->as, si->data.u_reg, emit->stack_start + i);
}
}
}
@ -808,7 +808,7 @@ STATIC void need_reg_all(emit_t *emit) {
stack_info_t *si = &emit->stack_info[i];
if (si->kind == STACK_REG) {
si->kind = STACK_VALUE;
ASM_MOV_REG_TO_LOCAL(emit->as, si->u_reg, emit->stack_start + i);
ASM_MOV_REG_TO_LOCAL(emit->as, si->data.u_reg, emit->stack_start + i);
}
}
}
@ -818,17 +818,17 @@ STATIC void need_stack_settled(emit_t *emit) {
for (int i = 0; i < emit->stack_size; i++) {
stack_info_t *si = &emit->stack_info[i];
if (si->kind == STACK_REG) {
DEBUG_printf(" reg(%u) to local(%u)\n", si->u_reg, emit->stack_start + i);
DEBUG_printf(" reg(%u) to local(%u)\n", si->data.u_reg, emit->stack_start + i);
si->kind = STACK_VALUE;
ASM_MOV_REG_TO_LOCAL(emit->as, si->u_reg, emit->stack_start + i);
ASM_MOV_REG_TO_LOCAL(emit->as, si->data.u_reg, emit->stack_start + i);
}
}
for (int i = 0; i < emit->stack_size; i++) {
stack_info_t *si = &emit->stack_info[i];
if (si->kind == STACK_IMM) {
DEBUG_printf(" imm(" INT_FMT ") to local(%u)\n", si->u_imm, emit->stack_start + i);
DEBUG_printf(" imm(" INT_FMT ") to local(%u)\n", si->data.u_imm, emit->stack_start + i);
si->kind = STACK_VALUE;
ASM_MOV_IMM_TO_LOCAL_USING(emit->as, si->u_imm, emit->stack_start + i, REG_TEMP0);
ASM_MOV_IMM_TO_LOCAL_USING(emit->as, si->data.u_imm, emit->stack_start + i, REG_TEMP0);
}
}
}
@ -844,13 +844,13 @@ STATIC void emit_access_stack(emit_t *emit, int pos, vtype_kind_t *vtype, int re
break;
case STACK_REG:
if (si->u_reg != reg_dest) {
ASM_MOV_REG_REG(emit->as, reg_dest, si->u_reg);
if (si->data.u_reg != reg_dest) {
ASM_MOV_REG_REG(emit->as, reg_dest, si->data.u_reg);
}
break;
case STACK_IMM:
ASM_MOV_IMM_TO_REG(emit->as, si->u_imm, reg_dest);
ASM_MOV_IMM_TO_REG(emit->as, si->data.u_imm, reg_dest);
break;
}
}
@ -864,7 +864,7 @@ STATIC void emit_fold_stack_top(emit_t *emit, int reg_dest) {
// if folded element was on the stack we need to put it in a register
ASM_MOV_LOCAL_TO_REG(emit->as, emit->stack_start + emit->stack_size - 1, reg_dest);
si->kind = STACK_REG;
si->u_reg = reg_dest;
si->data.u_reg = reg_dest;
}
adjust_stack(emit, -1);
}
@ -874,9 +874,9 @@ STATIC void emit_fold_stack_top(emit_t *emit, int reg_dest) {
STATIC void emit_pre_pop_reg_flexible(emit_t *emit, vtype_kind_t *vtype, int *reg_dest, int not_r1, int not_r2) {
emit->last_emit_was_return_value = false;
stack_info_t *si = peek_stack(emit, 0);
if (si->kind == STACK_REG && si->u_reg != not_r1 && si->u_reg != not_r2) {
if (si->kind == STACK_REG && si->data.u_reg != not_r1 && si->data.u_reg != not_r2) {
*vtype = si->vtype;
*reg_dest = si->u_reg;
*reg_dest = si->data.u_reg;
need_reg_single(emit, *reg_dest, 1);
} else {
emit_access_stack(emit, 1, vtype, *reg_dest);
@ -919,7 +919,7 @@ STATIC void emit_post_push_reg(emit_t *emit, vtype_kind_t vtype, int reg) {
stack_info_t *si = &emit->stack_info[emit->stack_size];
si->vtype = vtype;
si->kind = STACK_REG;
si->u_reg = reg;
si->data.u_reg = reg;
adjust_stack(emit, 1);
}
@ -927,7 +927,7 @@ STATIC void emit_post_push_imm(emit_t *emit, vtype_kind_t vtype, mp_int_t imm) {
stack_info_t *si = &emit->stack_info[emit->stack_size];
si->vtype = vtype;
si->kind = STACK_IMM;
si->u_imm = imm;
si->data.u_imm = imm;
adjust_stack(emit, 1);
}
@ -999,10 +999,10 @@ STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_de
si->kind = STACK_VALUE;
switch (si->vtype) {
case VTYPE_PYOBJ:
ASM_MOV_IMM_TO_LOCAL_USING(emit->as, si->u_imm, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
ASM_MOV_IMM_TO_LOCAL_USING(emit->as, si->data.u_imm, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
break;
case VTYPE_BOOL:
if (si->u_imm == 0) {
if (si->data.u_imm == 0) {
ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (mp_uint_t)mp_const_false, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
} else {
ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (mp_uint_t)mp_const_true, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
@ -1011,7 +1011,7 @@ STATIC void emit_get_stack_pointer_to_reg_for_pop(emit_t *emit, mp_uint_t reg_de
break;
case VTYPE_INT:
case VTYPE_UINT:
ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (si->u_imm << 1) | 1, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
ASM_MOV_IMM_TO_LOCAL_USING(emit->as, (si->data.u_imm << 1) | 1, emit->stack_start + emit->stack_size - 1 - i, reg_dest);
si->vtype = VTYPE_PYOBJ;
break;
default:
@ -1332,7 +1332,7 @@ STATIC void emit_native_load_subscr(emit_t *emit) {
stack_info_t *top = peek_stack(emit, 0);
if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
// index is an immediate
mp_int_t index_value = top->u_imm;
mp_int_t index_value = top->data.u_imm;
emit_pre_pop_discard(emit); // discard index
int reg_base = REG_ARG_1;
int reg_index = REG_ARG_2;
@ -1531,7 +1531,7 @@ STATIC void emit_native_store_subscr(emit_t *emit) {
stack_info_t *top = peek_stack(emit, 0);
if (top->vtype == VTYPE_INT && top->kind == STACK_IMM) {
// index is an immediate
mp_int_t index_value = top->u_imm;
mp_int_t index_value = top->data.u_imm;
emit_pre_pop_discard(emit); // discard index
vtype_kind_t vtype_value;
int reg_base = REG_ARG_1;
@ -2189,7 +2189,7 @@ STATIC void emit_native_call_function(emit_t *emit, mp_uint_t n_positional, mp_u
// casting operator
assert(n_positional == 1 && n_keyword == 0);
DEBUG_printf(" cast to %d\n", vtype_fun);
vtype_kind_t vtype_cast = peek_stack(emit, 1)->u_imm;
vtype_kind_t vtype_cast = peek_stack(emit, 1)->data.u_imm;
switch (peek_vtype(emit, 0)) {
case VTYPE_PYOBJ: {
vtype_kind_t vtype;