From 819b1988a096d88959aee0f4658a593ca16e4ce7 Mon Sep 17 00:00:00 2001 From: Dmitry Stogov Date: Tue, 25 Feb 2025 02:23:05 +0300 Subject: [PATCH] Update IR IR commit: ca93e781eaf6b0949690d3df272ecf44528ff4a8 --- ext/opcache/jit/ir/ir.c | 148 ++++++++------------- ext/opcache/jit/ir/ir_builder.h | 2 +- ext/opcache/jit/ir/ir_private.h | 3 + ext/opcache/jit/ir/ir_sccp.c | 225 +++++++++++++++++++++++++++++--- ext/opcache/jit/ir/ir_x86.dasc | 38 ++++-- 5 files changed, 291 insertions(+), 125 deletions(-) diff --git a/ext/opcache/jit/ir/ir.c b/ext/opcache/jit/ir/ir.c index cf0443bbddc..a6bb7c993c5 100644 --- a/ext/opcache/jit/ir/ir.c +++ b/ext/opcache/jit/ir/ir.c @@ -2407,7 +2407,7 @@ void _ir_BEGIN(ir_ctx *ctx, ir_ref src) } } -ir_ref _ir_fold_condition(ir_ctx *ctx, ir_ref ref) +static ir_ref _ir_fold_condition(ir_ctx *ctx, ir_ref ref) { ir_insn *insn = &ctx->ir_base[ref]; @@ -2415,12 +2415,59 @@ ir_ref _ir_fold_condition(ir_ctx *ctx, ir_ref ref) ir_insn *op2_insn = &ctx->ir_base[insn->op2]; if (IR_IS_TYPE_INT(op2_insn->type) && op2_insn->val.u64 == 0) { - return insn->op1; + ref = insn->op1; + insn = &ctx->ir_base[ref]; } + } else if (insn->op == IR_EQ && insn->op2 == IR_TRUE) { + ref = insn->op1; + insn = &ctx->ir_base[ref]; } +// while (insn->op == IR_SEXT || insn->op == IR_ZEXT || insn->op == IR_BITCAST) { +// ref = insn->op1; +// insn = &ctx->ir_base[ref]; +// } return ref; } +IR_ALWAYS_INLINE ir_ref ir_check_dominating_predicates_i(ir_ctx *ctx, ir_ref ref, ir_ref condition, ir_ref limit) +{ + ir_insn *prev = NULL; + ir_insn *insn; + + while (ref > limit) { + insn = &ctx->ir_base[ref]; + if (insn->op == IR_GUARD_NOT) { + if (insn->op2 == condition) { + return IR_FALSE; + } + } else if (insn->op == IR_GUARD) { + if (insn->op2 == condition) { + return IR_TRUE; + } + } else if (insn->op == IR_IF) { + if (insn->op2 == condition) { + if (prev->op == IR_IF_TRUE) { + return IR_TRUE; + } else if (prev->op == IR_IF_FALSE) { + return IR_FALSE; + } + } + } else if (insn->op == IR_START || insn->op == IR_MERGE || insn->op == IR_LOOP_BEGIN) { + break; + } + prev = insn; + ref = insn->op1; + } + + return condition; +} + +ir_ref ir_check_dominating_predicates(ir_ctx *ctx, ir_ref ref, ir_ref condition) +{ + IR_ASSERT(!IR_IS_CONST_REF(condition)); + return ir_check_dominating_predicates_i(ctx, ref, condition, (condition < ref) ? condition : 1); +} + ir_ref _ir_IF(ir_ctx *ctx, ir_ref condition) { ir_ref if_ref; @@ -2436,38 +2483,7 @@ ir_ref _ir_IF(ir_ctx *ctx, ir_ref condition) if (IR_IS_CONST_REF(condition)) { condition = ir_ref_is_true(ctx, condition) ? IR_TRUE : IR_FALSE; } else { - ir_insn *prev = NULL; - ir_ref ref = ctx->control; - ir_insn *insn; - - while (ref > condition) { - insn = &ctx->ir_base[ref]; - if (insn->op == IR_GUARD_NOT) { - if (insn->op2 == condition) { - condition = IR_FALSE; - break; - } - } else if (insn->op == IR_GUARD) { - if (insn->op2 == condition) { - condition = IR_TRUE; - break; - } - } else if (insn->op == IR_IF) { - if (insn->op2 == condition) { - if (prev->op == IR_IF_TRUE) { - condition = IR_TRUE; - break; - } else if (prev->op == IR_IF_FALSE) { - condition = IR_FALSE; - break; - } - } - } else if (insn->op == IR_START || insn->op == IR_MERGE || insn->op == IR_LOOP_BEGIN) { - break; - } - prev = insn; - ref = insn->op1; - } + condition = ir_check_dominating_predicates_i(ctx, ctx->control, condition, condition); } if_ref = ir_emit2(ctx, IR_IF, ctx->control, condition); ctx->control = IR_UNUSED; @@ -2986,35 +3002,9 @@ void _ir_GUARD(ir_ctx *ctx, ir_ref condition, ir_ref addr) } condition = IR_FALSE; } else if (EXPECTED(ctx->flags & IR_OPT_FOLDING)) { - ir_insn *prev = NULL; - ir_ref ref = ctx->control; - ir_insn *insn; - - while (ref > condition) { - insn = &ctx->ir_base[ref]; - if (insn->op == IR_GUARD) { - if (insn->op2 == condition) { - return; - } - } else if (insn->op == IR_GUARD_NOT) { - if (insn->op2 == condition) { - condition = IR_FALSE; - break; - } - } else if (insn->op == IR_IF) { - if (insn->op2 == condition) { - if (prev->op == IR_IF_TRUE) { - return; - } else if (prev->op == IR_IF_FALSE) { - condition = IR_FALSE; - break; - } - } - } else if (insn->op == IR_START || insn->op == IR_MERGE || insn->op == IR_LOOP_BEGIN) { - break; - } - prev = insn; - ref = insn->op1; + condition = ir_check_dominating_predicates_i(ctx, ctx->control, condition, condition); + if (condition == IR_TRUE) { + return; } } if (ctx->snapshot_create) { @@ -3032,35 +3022,9 @@ void _ir_GUARD_NOT(ir_ctx *ctx, ir_ref condition, ir_ref addr) } condition = IR_TRUE; } else if (EXPECTED(ctx->flags & IR_OPT_FOLDING)) { - ir_insn *prev = NULL; - ir_ref ref = ctx->control; - ir_insn *insn; - - while (ref > condition) { - insn = &ctx->ir_base[ref]; - if (insn->op == IR_GUARD_NOT) { - if (insn->op2 == condition) { - return; - } - } else if (insn->op == IR_GUARD) { - if (insn->op2 == condition) { - condition = IR_TRUE; - break; - } - } else if (insn->op == IR_IF) { - if (insn->op2 == condition) { - if (prev->op == IR_IF_TRUE) { - condition = IR_TRUE; - break; - } else if (prev->op == IR_IF_FALSE) { - return; - } - } - } else if (insn->op == IR_START || insn->op == IR_MERGE || insn->op == IR_LOOP_BEGIN) { - break; - } - prev = insn; - ref = insn->op1; + condition = ir_check_dominating_predicates_i(ctx, ctx->control, condition, condition); + if (condition == IR_FALSE) { + return; } } if (ctx->snapshot_create) { diff --git a/ext/opcache/jit/ir/ir_builder.h b/ext/opcache/jit/ir/ir_builder.h index 208c1ae4c81..4e4ea53683a 100644 --- a/ext/opcache/jit/ir/ir_builder.h +++ b/ext/opcache/jit/ir/ir_builder.h @@ -528,7 +528,7 @@ extern "C" { #define ir_ALLOCA(_size) _ir_ALLOCA(_ir_CTX, (_size)) #define ir_AFREE(_size) _ir_AFREE(_ir_CTX, (_size)) -#define ir_VADDR(_var) ir_emit1(_ir_CTX, IR_OPT(IR_VADDR, IR_ADDR), (_var)) +#define ir_VADDR(_var) ir_fold1(_ir_CTX, IR_OPT(IR_VADDR, IR_ADDR), (_var)) #define ir_VLOAD(_type, _var) _ir_VLOAD(_ir_CTX, (_type), (_var)) #define ir_VLOAD_B(_var) _ir_VLOAD(_ir_CTX, IR_BOOL, (_var)) #define ir_VLOAD_U8(_var) _ir_VLOAD(_ir_CTX, IR_U8, (_var)) diff --git a/ext/opcache/jit/ir/ir_private.h b/ext/opcache/jit/ir/ir_private.h index 7231242a6cf..9c69d6074de 100644 --- a/ext/opcache/jit/ir/ir_private.h +++ b/ext/opcache/jit/ir/ir_private.h @@ -1182,6 +1182,9 @@ ir_ref ir_find_aliasing_vload(ir_ctx *ctx, ir_ref ref, ir_type type, ir_ref var) ir_ref ir_find_aliasing_store(ir_ctx *ctx, ir_ref ref, ir_ref addr, ir_ref val); ir_ref ir_find_aliasing_vstore(ir_ctx *ctx, ir_ref ref, ir_ref addr, ir_ref val); +/*** Predicates (see ir.c) ***/ +ir_ref ir_check_dominating_predicates(ir_ctx *ctx, ir_ref ref, ir_ref condition); + /*** IR Live Info ***/ typedef ir_ref ir_live_pos; typedef struct _ir_use_pos ir_use_pos; diff --git a/ext/opcache/jit/ir/ir_sccp.c b/ext/opcache/jit/ir/ir_sccp.c index 0c69b530b02..18192d7f179 100644 --- a/ext/opcache/jit/ir/ir_sccp.c +++ b/ext/opcache/jit/ir/ir_sccp.c @@ -656,6 +656,7 @@ static IR_NEVER_INLINE void ir_sccp_analyze(ir_ctx *ctx, ir_insn *_values, ir_bi } } IR_MAKE_BOTTOM(i); + ir_bitqueue_add(iter_worklist, i); } else if (insn->op == IR_SWITCH) { if (IR_IS_TOP(insn->op2)) { ir_sccp_add_input(ctx, _values, worklist, insn->op2); @@ -1144,6 +1145,35 @@ static IR_NEVER_INLINE void ir_sccp_transform(ir_ctx *ctx, ir_insn *_values, ir_ /* Iterative Optimizations */ /***************************/ +/* Modification of some instruction may open new optimization oprtunities for other + * instructions that use this one. + * + * For example, let "a = ADD(x, y)" became "a = ADD(x, C1)". In case we also have + * "b = ADD(a, C2)" we may optimize it into "b = ADD(x, C1 + C2)" and then might + * also remove "a". + * + * This implementation supports only few optimization of combinations from ir_fold.h + * + * TODO: Think abput a more general solution ??? + */ +static void ir_iter_add_related_uses(ir_ctx *ctx, ir_ref ref, ir_bitqueue *worklist) +{ + ir_insn *insn = &ctx->ir_base[ref]; + + if (insn->op == IR_ADD || insn->op == IR_SUB) { + ir_use_list *use_list = &ctx->use_lists[ref]; + + if (use_list->count == 1) { + ir_ref use = ctx->use_edges[use_list->refs]; + ir_insn *use_insn = &ctx->ir_base[ref]; + + if (use_insn->op == IR_ADD || use_insn->op == IR_SUB) { + ir_bitqueue_add(worklist, use); + } + } + } +} + static void ir_iter_remove_insn(ir_ctx *ctx, ir_ref ref, ir_bitqueue *worklist) { ir_ref j, n, *p; @@ -1191,6 +1221,7 @@ void ir_iter_replace(ir_ctx *ctx, ir_ref ref, ir_ref new_ref, ir_bitqueue *workl ir_insn_set_op(insn, i, new_ref); /* schedule folding */ ir_bitqueue_add(worklist, use); + ir_iter_add_related_uses(ctx, use, worklist); } } else { for (j = 0; j < n; j++, p++) { @@ -1812,9 +1843,6 @@ static ir_ref ir_ext_ref(ir_ctx *ctx, ir_ref var_ref, ir_ref src_ref, ir_op op, } ref = ir_emit1(ctx, optx, src_ref); - ctx->use_lists = ir_mem_realloc(ctx->use_lists, ctx->insns_count * sizeof(ir_use_list)); - ctx->use_lists[ref].count = 0; - ctx->use_lists[ref].refs = IR_UNUSED; ir_use_list_add(ctx, ref, var_ref); if (!IR_IS_CONST_REF(src_ref)) { ir_use_list_replace_one(ctx, src_ref, var_ref, ref); @@ -1894,6 +1922,7 @@ static bool ir_try_promote_ext(ir_ctx *ctx, ir_ref ext_ref, ir_insn *insn, ir_bi } else { ctx->ir_base[use].op1 = ir_ext_ref(ctx, use, use_insn->op1, op, type, worklist); } + ir_bitqueue_add(worklist, use); } if (use_insn->op2 != ref) { if (IR_IS_CONST_REF(use_insn->op2) @@ -1902,6 +1931,7 @@ static bool ir_try_promote_ext(ir_ctx *ctx, ir_ref ext_ref, ir_insn *insn, ir_bi } else { ctx->ir_base[use].op2 = ir_ext_ref(ctx, use, use_insn->op2, op, type, worklist); } + ir_bitqueue_add(worklist, use); } } } @@ -2152,7 +2182,7 @@ static bool ir_optimize_phi(ir_ctx *ctx, ir_ref merge_ref, ir_insn *merge, ir_re ir_ref root_ref = start1->op1; ir_insn *root = &ctx->ir_base[root_ref]; - if (root->op == IR_IF && ctx->use_lists[root->op2].count == 1) { + if (root->op == IR_IF && !IR_IS_CONST_REF(root->op2) && ctx->use_lists[root->op2].count == 1) { ir_ref cond_ref = root->op2; ir_insn *cond = &ctx->ir_base[cond_ref]; ir_type type = insn->type; @@ -2873,7 +2903,7 @@ static bool ir_try_split_if_cmp(ir_ctx *ctx, ir_ref ref, ir_insn *insn, ir_bitqu return 0; } -static void ir_optimize_merge(ir_ctx *ctx, ir_ref merge_ref, ir_insn *merge, ir_bitqueue *worklist) +static void ir_iter_optimize_merge(ir_ctx *ctx, ir_ref merge_ref, ir_insn *merge, ir_bitqueue *worklist) { ir_use_list *use_list = &ctx->use_lists[merge_ref]; @@ -2918,6 +2948,171 @@ static void ir_optimize_merge(ir_ctx *ctx, ir_ref merge_ref, ir_insn *merge, ir_ } } +static ir_ref ir_iter_optimize_condition(ir_ctx *ctx, ir_ref control, ir_ref condition, bool *swap) +{ + ir_insn *condition_insn = &ctx->ir_base[condition]; + + if (condition_insn->opt == IR_OPT(IR_NOT, IR_BOOL)) { + *swap = 1; + condition = condition_insn->op1; + condition_insn = &ctx->ir_base[condition]; + } + + if (condition_insn->op == IR_NE && IR_IS_CONST_REF(condition_insn->op2)) { + ir_insn *val_insn = &ctx->ir_base[condition_insn->op2]; + + if (IR_IS_TYPE_INT(val_insn->type) && val_insn->val.u64 == 0) { + condition = condition_insn->op1; + condition_insn = &ctx->ir_base[condition]; + } + } else if (condition_insn->op == IR_EQ && IR_IS_CONST_REF(condition_insn->op2)) { + ir_insn *val_insn = &ctx->ir_base[condition_insn->op2]; + + if (condition_insn->op2 == IR_TRUE) { + condition = condition_insn->op1; + condition_insn = &ctx->ir_base[condition]; + } else if (IR_IS_TYPE_INT(val_insn->type) && val_insn->val.u64 == 0) { + condition = condition_insn->op1; + condition_insn = &ctx->ir_base[condition]; + *swap = !*swap; + } + } + + while ((condition_insn->op == IR_BITCAST + || condition_insn->op == IR_ZEXT + || condition_insn->op == IR_SEXT) + && ctx->use_lists[condition].count == 1) { + condition = condition_insn->op1; + condition_insn = &ctx->ir_base[condition]; + } + + if (!IR_IS_CONST_REF(condition) && ctx->use_lists[condition].count > 1) { + condition = ir_check_dominating_predicates(ctx, control, condition); + } + + return condition; +} + +static void ir_iter_optimize_if(ir_ctx *ctx, ir_ref ref, ir_insn *insn, ir_bitqueue *worklist) +{ + bool swap = 0; + ir_ref condition = ir_iter_optimize_condition(ctx, insn->op1, insn->op2, &swap); + + if (swap) { + ir_use_list *use_list = &ctx->use_lists[ref]; + ir_ref *p, use; + + IR_ASSERT(use_list->count == 2); + p = ctx->use_edges + use_list->refs; + use = *p; + if (ctx->ir_base[use].op == IR_IF_TRUE) { + ctx->ir_base[use].op = IR_IF_FALSE; + use = *(p+1); + ctx->ir_base[use].op = IR_IF_TRUE; + } else { + ctx->ir_base[use].op = IR_IF_TRUE; + use = *(p+1); + ctx->ir_base[use].op = IR_IF_FALSE; + } + } + + if (IR_IS_CONST_REF(condition)) { + /* + * | | + * IF(TRUE) => END + * | \ | + * | +------+ | + * | IF_TRUE | BEGIN(unreachable) + * IF_FALSE | BEGIN + * | | + */ + ir_ref if_true_ref, if_false_ref; + ir_insn *if_true, *if_false; + + insn->optx = IR_OPTX(IR_END, IR_VOID, 1); + if (!IR_IS_CONST_REF(insn->op2)) { + ir_use_list_remove_one(ctx, insn->op2, ref); + } + insn->op2 = IR_UNUSED; + + ir_get_true_false_refs(ctx, ref, &if_true_ref, &if_false_ref); + if_true = &ctx->ir_base[if_true_ref]; + if_false = &ctx->ir_base[if_false_ref]; + if_true->op = IR_BEGIN; + if_false->op = IR_BEGIN; + if (ir_ref_is_true(ctx, condition)) { + if_false->op1 = IR_UNUSED; + ir_use_list_remove_one(ctx, ref, if_false_ref); + ir_bitqueue_add(worklist, if_true_ref); + } else { + if_true->op1 = IR_UNUSED; + ir_use_list_remove_one(ctx, ref, if_true_ref); + ir_bitqueue_add(worklist, if_false_ref); + } + ctx->flags2 &= ~IR_CFG_REACHABLE; + } else if (insn->op2 != condition) { + ir_iter_update_op(ctx, ref, 2, condition, worklist); + } +} + +static void ir_iter_optimize_guard(ir_ctx *ctx, ir_ref ref, ir_insn *insn, ir_bitqueue *worklist) +{ + bool swap; + ir_ref condition = ir_iter_optimize_condition(ctx, insn->op1, insn->op2, &swap); + + if (swap) { + if (insn->op == IR_GUARD) { + insn->op = IR_GUARD_NOT; + } else { + insn->op = IR_GUARD; + } + } + + if (IR_IS_CONST_REF(condition)) { + if (insn->op == IR_GUARD) { + if (ir_ref_is_true(ctx, condition)) { + ir_ref prev, next; + +remove_guard: + prev = insn->op1; + next = ir_next_control(ctx, ref); + ctx->ir_base[next].op1 = prev; + ir_use_list_remove_one(ctx, ref, next); + ir_use_list_replace_one(ctx, prev, ref, next); + insn->op1 = IR_UNUSED; + + if (!IR_IS_CONST_REF(insn->op2)) { + ir_use_list_remove_one(ctx, insn->op2, ref); + if (ir_is_dead(ctx, insn->op2)) { + /* schedule DCE */ + ir_bitqueue_add(worklist, insn->op2); + } + } + + if (insn->op3) { + /* SNAPSHOT */ + ir_iter_remove_insn(ctx, insn->op3, worklist); + } + + MAKE_NOP(insn); + return; + } else { + condition = IR_FALSE; + } + } else { + if (ir_ref_is_true(ctx, condition)) { + condition = IR_TRUE; + } else { + goto remove_guard; + } + } + } + + if (insn->op2 != condition) { + ir_iter_update_op(ctx, ref, 2, condition, worklist); + } +} + void ir_iter_opt(ir_ctx *ctx, ir_bitqueue *worklist) { ir_ref i, val; @@ -2993,7 +3188,7 @@ folding: ir_merge_blocks(ctx, insn->op1, i, worklist); } } else if (insn->op == IR_MERGE) { - ir_optimize_merge(ctx, i, insn, worklist); + ir_iter_optimize_merge(ctx, i, insn, worklist); } } else if (ir_is_dead_load(ctx, i)) { ir_ref next; @@ -3081,20 +3276,10 @@ remove_bitcast: } else { goto remove_bitcast; } - } else if (insn->op == IR_IF || insn->op == IR_GUARD || insn->op == IR_GUARD_NOT) { - ir_insn *condition_insn = &ctx->ir_base[insn->op2]; - - if (condition_insn->op == IR_BITCAST || condition_insn->op == IR_ZEXT || condition_insn->op == IR_SEXT) { - ir_iter_update_op(ctx, i, 2, condition_insn->op1, worklist); - condition_insn = &ctx->ir_base[condition_insn->op1]; - } - if (condition_insn->op == IR_NE && IR_IS_CONST_REF(condition_insn->op2)) { - ir_insn *val_insn = &ctx->ir_base[condition_insn->op2]; - - if (IR_IS_TYPE_INT(val_insn->type) && val_insn->val.u64 == 0) { - ir_iter_update_op(ctx, i, 2, condition_insn->op1, worklist); - } - } + } else if (insn->op == IR_IF) { + ir_iter_optimize_if(ctx, i, insn, worklist); + } else if (insn->op == IR_GUARD || insn->op == IR_GUARD_NOT) { + ir_iter_optimize_guard(ctx, i, insn, worklist); } } } diff --git a/ext/opcache/jit/ir/ir_x86.dasc b/ext/opcache/jit/ir/ir_x86.dasc index dce15b5be3b..c4f0eae01c0 100644 --- a/ext/opcache/jit/ir/ir_x86.dasc +++ b/ext/opcache/jit/ir/ir_x86.dasc @@ -6414,16 +6414,23 @@ static void ir_emit_cond_cmp_int(ir_ctx *ctx, ir_ref def, ir_insn *insn) ir_reg op3_reg = ctx->regs[def][3]; ir_op op; - if (op2_reg != IR_REG_NONE && IR_REG_SPILLED(op2_reg)) { + if (op2 != op3) { + if (op2_reg != IR_REG_NONE && IR_REG_SPILLED(op2_reg)) { + op2_reg = IR_REG_NUM(op2_reg); + ir_emit_load(ctx, type, op2_reg, op2); + } + if (op3_reg != IR_REG_NONE && IR_REG_SPILLED(op3_reg)) { + op3_reg = IR_REG_NUM(op3_reg); + ir_emit_load(ctx, type, op3_reg, op3); + } + } else if (op2_reg != IR_REG_NONE && IR_REG_SPILLED(op2_reg)) { op2_reg = IR_REG_NUM(op2_reg); ir_emit_load(ctx, type, op2_reg, op2); - if (op3 == op2) { - op3_reg = op2_reg; - } - } - if (op3_reg != IR_REG_NONE && op3 != op2 && IR_REG_SPILLED(op3_reg)) { + op3_reg = op2_reg; + } else if (op3_reg != IR_REG_NONE && IR_REG_SPILLED(op3_reg)) { op3_reg = IR_REG_NUM(op3_reg); ir_emit_load(ctx, type, op3_reg, op3); + op2_reg = op3_reg; } ir_emit_cmp_int_common2(ctx, def, insn->op1, &ctx->ir_base[insn->op1]); @@ -6578,16 +6585,23 @@ static void ir_emit_cond_cmp_fp(ir_ctx *ctx, ir_ref def, ir_insn *insn) ir_reg op3_reg = ctx->regs[def][3]; ir_op op; - if (op2_reg != IR_REG_NONE && IR_REG_SPILLED(op2_reg)) { + if (op2 != op3) { + if (op2_reg != IR_REG_NONE && IR_REG_SPILLED(op2_reg)) { + op2_reg = IR_REG_NUM(op2_reg); + ir_emit_load(ctx, type, op2_reg, op2); + } + if (op3_reg != IR_REG_NONE && IR_REG_SPILLED(op3_reg)) { + op3_reg = IR_REG_NUM(op3_reg); + ir_emit_load(ctx, type, op3_reg, op3); + } + } else if (op2_reg != IR_REG_NONE && IR_REG_SPILLED(op2_reg)) { op2_reg = IR_REG_NUM(op2_reg); ir_emit_load(ctx, type, op2_reg, op2); - if (op3 == op2) { - op3_reg = op2_reg; - } - } - if (op3_reg != IR_REG_NONE && op3 != op2 && IR_REG_SPILLED(op3_reg)) { + op3_reg = op2_reg; + } else if (op3_reg != IR_REG_NONE && IR_REG_SPILLED(op3_reg)) { op3_reg = IR_REG_NUM(op3_reg); ir_emit_load(ctx, type, op3_reg, op3); + op2_reg = op3_reg; } op = ir_emit_cmp_fp_common(ctx, def, insn->op1, &ctx->ir_base[insn->op1]);