1
0
mirror of https://github.com/php/php-src.git synced 2026-03-24 00:02:20 +01:00

Update IR

IR commit: 84df6f8d409c7d06daa68d96a25d0aed81dcbf4a
This commit is contained in:
Dmitry Stogov
2024-03-18 21:47:06 +03:00
parent 9bed0b5f2f
commit 968a807d97
7 changed files with 230 additions and 245 deletions

View File

@@ -238,7 +238,7 @@ static int parse_rule(const char *buf)
return mask;
}
int main()
int main(int argc, char **argv)
{
char buf[4096];
FILE *f = stdin;

View File

@@ -382,7 +382,7 @@ void ir_init(ir_ctx *ctx, uint32_t flags, ir_ref consts_limit, ir_ref insns_limi
buf = ir_mem_malloc((consts_limit + insns_limit) * sizeof(ir_insn));
ctx->ir_base = buf + consts_limit;
ctx->ir_base[IR_UNUSED].optx = IR_NOP;
MAKE_NOP(&ctx->ir_base[IR_UNUSED]);
ctx->ir_base[IR_NULL].optx = IR_OPT(IR_C_ADDR, IR_ADDR);
ctx->ir_base[IR_NULL].val.u64 = 0;
ctx->ir_base[IR_FALSE].optx = IR_OPT(IR_C_BOOL, IR_BOOL);
@@ -1296,7 +1296,7 @@ void ir_use_list_remove_one(ir_ctx *ctx, ir_ref from, ir_ref ref)
}
}
void ir_use_list_replace(ir_ctx *ctx, ir_ref ref, ir_ref use, ir_ref new_use)
void ir_use_list_replace_one(ir_ctx *ctx, ir_ref ref, ir_ref use, ir_ref new_use)
{
ir_use_list *use_list = &ctx->use_lists[ref];
ir_ref i, n, *p;
@@ -1310,6 +1310,19 @@ void ir_use_list_replace(ir_ctx *ctx, ir_ref ref, ir_ref use, ir_ref new_use)
}
}
void ir_use_list_replace_all(ir_ctx *ctx, ir_ref ref, ir_ref use, ir_ref new_use)
{
ir_use_list *use_list = &ctx->use_lists[ref];
ir_ref i, n, *p;
n = use_list->count;
for (i = 0, p = &ctx->use_edges[use_list->refs]; i < n; i++, p++) {
if (*p == use) {
*p = new_use;
}
}
}
bool ir_use_list_add(ir_ctx *ctx, ir_ref to, ir_ref ref)
{
ir_use_list *use_list = &ctx->use_lists[to];
@@ -2679,10 +2692,7 @@ void _ir_STORE(ir_ctx *ctx, ir_ref addr, ir_ref val)
} else {
ctx->control = insn->op1;
}
insn->optx = IR_NOP;
insn->op1 = IR_NOP;
insn->op2 = IR_NOP;
insn->op3 = IR_NOP;
MAKE_NOP(insn);
}
break;
}

View File

@@ -8,30 +8,6 @@
#include "ir.h"
#include "ir_private.h"
#define MAKE_NOP(_insn) do { \
ir_insn *__insn = _insn; \
__insn->optx = IR_NOP; \
__insn->op1 = __insn->op2 = __insn->op3 = IR_UNUSED; \
} while (0)
#define CLEAR_USES(_ref) do { \
ir_use_list *__use_list = &ctx->use_lists[_ref]; \
__use_list->count = 0; \
__use_list->refs = 0; \
} while (0)
#define SWAP_REFS(_ref1, _ref2) do { \
ir_ref _tmp = _ref1; \
_ref1 = _ref2; \
_ref2 = _tmp; \
} while (0)
#define SWAP_INSNS(_insn1, _insn2) do { \
ir_insn *_tmp = _insn1; \
_insn1 = _insn2; \
_insn2 = _tmp; \
} while (0)
static void ir_get_true_false_refs(const ir_ctx *ctx, ir_ref if_ref, ir_ref *if_true_ref, ir_ref *if_false_ref)
{
ir_use_list *use_list = &ctx->use_lists[if_ref];
@@ -54,7 +30,6 @@ static ir_ref _ir_merge_blocks(ir_ctx *ctx, ir_ref end, ir_ref begin)
{
ir_ref prev, next;
ir_use_list *use_list;
ir_ref n, *p;
IR_ASSERT(ctx->ir_base[begin].op == IR_BEGIN);
IR_ASSERT(ctx->ir_base[end].op == IR_END);
@@ -68,23 +43,12 @@ static ir_ref _ir_merge_blocks(ir_ctx *ctx, ir_ref end, ir_ref begin)
next = ctx->use_edges[use_list->refs];
/* remove BEGIN and END */
ctx->ir_base[begin].op = IR_NOP;
ctx->ir_base[begin].op1 = IR_UNUSED;
ctx->use_lists[begin].count = 0;
ctx->ir_base[end].op = IR_NOP;
ctx->ir_base[end].op1 = IR_UNUSED;
ctx->use_lists[end].count = 0;
MAKE_NOP(&ctx->ir_base[begin]); CLEAR_USES(begin);
MAKE_NOP(&ctx->ir_base[end]); CLEAR_USES(end);
/* connect their predecessor and successor */
ctx->ir_base[next].op1 = prev;
use_list = &ctx->use_lists[prev];
n = use_list->count;
for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
if (*p == end) {
*p = next;
}
}
ir_use_list_replace_all(ctx, prev, end, next);
return next;
}
@@ -141,7 +105,7 @@ static ir_ref ir_try_remove_empty_diamond(ir_ctx *ctx, ir_ref ref, ir_insn *insn
IR_ASSERT(ctx->use_lists[start2_ref].count == 1);
next->op1 = root->op1;
ir_use_list_replace(ctx, root->op1, root_ref, next_ref);
ir_use_list_replace_one(ctx, root->op1, root_ref, next_ref);
if (!IR_IS_CONST_REF(root->op2)) {
ir_use_list_remove_all(ctx, root->op2, root_ref);
}
@@ -189,7 +153,7 @@ static ir_ref ir_try_remove_empty_diamond(ir_ctx *ctx, ir_ref ref, ir_insn *insn
ir_insn *root = &ctx->ir_base[root_ref];
next->op1 = root->op1;
ir_use_list_replace(ctx, root->op1, root_ref, next_ref);
ir_use_list_replace_one(ctx, root->op1, root_ref, next_ref);
ir_use_list_remove_all(ctx, root->op2, root_ref);
MAKE_NOP(root); CLEAR_USES(root_ref);
@@ -303,8 +267,7 @@ static ir_ref ir_optimize_phi(ir_ctx *ctx, ir_ref merge_ref, ir_insn *merge, ir_
insn->op3 = IR_UNUSED;
next->op1 = root->op1;
ir_use_list_replace(ctx, root->op1, root_ref, next_ref);
ir_use_list_remove_all(ctx, root->op2, root_ref);
ir_use_list_replace_one(ctx, root->op1, root_ref, next_ref);
if (!IR_IS_CONST_REF(insn->op1)) {
ir_use_list_remove_all(ctx, insn->op1, cond_ref);
}
@@ -384,8 +347,8 @@ static ir_ref ir_optimize_phi(ir_ctx *ctx, ir_ref merge_ref, ir_insn *merge, ir_
insn->op3 = IR_UNUSED;
next->op1 = root->op1;
ir_use_list_replace(ctx, root->op1, root_ref, next_ref);
ir_use_list_remove_all(ctx, root->op2, root_ref);
ir_use_list_replace_one(ctx, root->op1, root_ref, next_ref);
ir_use_list_remove_one(ctx, insn->op1, neg_ref);
if (!IR_IS_CONST_REF(insn->op1)) {
ir_use_list_remove_all(ctx, insn->op1, cond_ref);
}
@@ -440,8 +403,8 @@ static ir_ref ir_optimize_phi(ir_ctx *ctx, ir_ref merge_ref, ir_insn *merge, ir_
}
next->op1 = root->op1;
ir_use_list_replace(ctx, cond_ref, root_ref, ref);
ir_use_list_replace(ctx, root->op1, root_ref, next_ref);
ir_use_list_replace_one(ctx, cond_ref, root_ref, ref);
ir_use_list_replace_one(ctx, root->op1, root_ref, next_ref);
ir_use_list_remove_all(ctx, root->op2, root_ref);
MAKE_NOP(root); CLEAR_USES(root_ref);
@@ -619,9 +582,9 @@ static ir_ref ir_try_split_if(ir_ctx *ctx, ir_ref ref, ir_insn *insn)
ir_use_list_remove_all(ctx, merge_ref, cond_ref);
ir_use_list_remove_all(ctx, ref, if_true_ref);
if (!IR_IS_CONST_REF(cond->op3)) {
ir_use_list_replace(ctx, cond->op3, cond_ref, end2_ref);
ir_use_list_replace_one(ctx, cond->op3, cond_ref, end2_ref);
}
ir_use_list_replace(ctx, end1_ref, merge_ref, if_false_ref);
ir_use_list_replace_one(ctx, end1_ref, merge_ref, if_false_ref);
ir_use_list_add(ctx, end2_ref, if_true_ref);
end2->optx = IR_OPTX(IR_IF, IR_VOID, 2);
@@ -722,8 +685,8 @@ static ir_ref ir_try_split_if_cmp(ir_ctx *ctx, ir_worklist *worklist, ir_ref ref
* | |
*/
ir_use_list_replace(ctx, end1_ref, merge_ref, if_false_ref);
ir_use_list_replace(ctx, end2_ref, merge_ref, if_true_ref);
ir_use_list_replace_one(ctx, end1_ref, merge_ref, if_false_ref);
ir_use_list_replace_one(ctx, end2_ref, merge_ref, if_true_ref);
MAKE_NOP(merge); CLEAR_USES(merge_ref);
MAKE_NOP(phi); CLEAR_USES(phi_ref);
@@ -762,8 +725,8 @@ static ir_ref ir_try_split_if_cmp(ir_ctx *ctx, ir_worklist *worklist, ir_ref ref
* | |
*/
ir_use_list_replace(ctx, end1_ref, merge_ref, if_false_ref);
ir_use_list_replace(ctx, end2_ref, merge_ref, if_false_ref);
ir_use_list_replace_one(ctx, end1_ref, merge_ref, if_false_ref);
ir_use_list_replace_one(ctx, end2_ref, merge_ref, if_false_ref);
MAKE_NOP(merge); CLEAR_USES(merge_ref);
MAKE_NOP(phi); CLEAR_USES(phi_ref);
@@ -809,10 +772,10 @@ static ir_ref ir_try_split_if_cmp(ir_ctx *ctx, ir_worklist *worklist, ir_ref ref
ir_use_list_remove_all(ctx, merge_ref, phi_ref);
ir_use_list_remove_all(ctx, ref, if_true_ref);
if (!IR_IS_CONST_REF(phi->op3)) {
ir_use_list_replace(ctx, phi->op3, phi_ref, insn->op2);
ir_use_list_replace_one(ctx, phi->op3, phi_ref, insn->op2);
}
ir_use_list_replace(ctx, end1_ref, merge_ref, if_false_ref);
ir_use_list_replace(ctx, cond_ref, ref, end2_ref);
ir_use_list_replace_one(ctx, end1_ref, merge_ref, if_false_ref);
ir_use_list_replace_one(ctx, cond_ref, ref, end2_ref);
ir_use_list_add(ctx, end2_ref, if_true_ref);
end2->optx = IR_OPTX(IR_IF, IR_VOID, 2);

View File

@@ -125,9 +125,9 @@ bool ir_check(const ir_ctx *ctx)
ok = 0;
}
}
if (use >= i
&& !(insn->op == IR_PHI
&& (!(ctx->flags2 & IR_LINEAR) || ctx->ir_base[insn->op1].op == IR_LOOP_BEGIN))) {
if ((ctx->flags2 & IR_LINEAR)
&& use >= i
&& !(insn->op == IR_PHI && ctx->ir_base[insn->op1].op == IR_LOOP_BEGIN)) {
fprintf(stderr, "ir_base[%d].ops[%d] invalid forward reference (%d)\n", i, j, use);
ok = 0;
}
@@ -216,7 +216,8 @@ bool ir_check(const ir_ctx *ctx)
}
break;
case IR_OPND_CONTROL_DEP:
if (use >= i
if ((ctx->flags2 & IR_LINEAR)
&& use >= i
&& !(insn->op == IR_LOOP_BEGIN)) {
fprintf(stderr, "ir_base[%d].ops[%d] invalid forward reference (%d)\n", i, j, use);
ok = 0;

View File

@@ -11,12 +11,15 @@
#include "ir.h"
#include "ir_private.h"
static int32_t ir_gcm_schedule_early(ir_ctx *ctx, int32_t *_blocks, ir_ref ref, ir_list *queue_rest)
#define IR_GCM_IS_SCHEDULED_EARLY(b) (((int32_t)(b)) < 0)
#define IR_GCM_EARLY_BLOCK(b) ((uint32_t)-((int32_t)(b)))
static uint32_t ir_gcm_schedule_early(ir_ctx *ctx, ir_ref ref, ir_list *queue_rest)
{
ir_ref n, *p, input;
ir_insn *insn;
uint32_t dom_depth;
int32_t b, result;
uint32_t b, result;
bool reschedule_late = 1;
insn = &ctx->ir_base[ref];
@@ -31,11 +34,11 @@ static int32_t ir_gcm_schedule_early(ir_ctx *ctx, int32_t *_blocks, ir_ref ref,
for (p = insn->ops + 1; n > 0; p++, n--) {
input = *p;
if (input > 0) {
b = _blocks[input];
if (b == 0) {
b = ir_gcm_schedule_early(ctx, _blocks, input, queue_rest);
} else if (b < 0) {
b = -b;
b = ctx->cfg_map[input];
if (IR_GCM_IS_SCHEDULED_EARLY(b)) {
b = IR_GCM_EARLY_BLOCK(b);
} else if (!b) {
b = ir_gcm_schedule_early(ctx, input, queue_rest);
}
if (dom_depth < ctx->cfg_blocks[b].dom_depth) {
dom_depth = ctx->cfg_blocks[b].dom_depth;
@@ -44,8 +47,8 @@ static int32_t ir_gcm_schedule_early(ir_ctx *ctx, int32_t *_blocks, ir_ref ref,
reschedule_late = 0;
}
}
_blocks[ref] = -result;
ctx->cfg_map[ref] = IR_GCM_EARLY_BLOCK(result);
if (UNEXPECTED(reschedule_late)) {
/* Floating nodes that don't depend on other nodes
* (e.g. only on constants), have to be scheduled to the
@@ -58,7 +61,7 @@ static int32_t ir_gcm_schedule_early(ir_ctx *ctx, int32_t *_blocks, ir_ref ref,
}
/* Last Common Ancestor */
static int32_t ir_gcm_find_lca(ir_ctx *ctx, int32_t b1, int32_t b2)
static uint32_t ir_gcm_find_lca(ir_ctx *ctx, uint32_t b1, uint32_t b2)
{
uint32_t dom_depth;
@@ -77,154 +80,147 @@ static int32_t ir_gcm_find_lca(ir_ctx *ctx, int32_t b1, int32_t b2)
return b2;
}
static void ir_gcm_schedule_late(ir_ctx *ctx, int32_t *_blocks, ir_ref ref)
static void ir_gcm_schedule_late(ir_ctx *ctx, ir_ref ref, uint32_t b)
{
ir_ref n, *p, use;
ir_insn *insn;
ir_use_list *use_list;
uint32_t lca = 0;
IR_ASSERT(_blocks[ref] < 0);
_blocks[ref] = -_blocks[ref];
IR_ASSERT(ctx->ir_base[ref].op != IR_PARAM && ctx->ir_base[ref].op != IR_VAR);
IR_ASSERT(ctx->ir_base[ref].op != IR_PHI && ctx->ir_base[ref].op != IR_PI);
IR_ASSERT(IR_GCM_IS_SCHEDULED_EARLY(b));
b = IR_GCM_EARLY_BLOCK(b);
ctx->cfg_map[ref] = b;
use_list = &ctx->use_lists[ref];
n = use_list->count;
if (n) {
int32_t lca, b;
insn = &ctx->ir_base[ref];
IR_ASSERT(insn->op != IR_PARAM && insn->op != IR_VAR);
IR_ASSERT(insn->op != IR_PHI && insn->op != IR_PI);
for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
use = *p;
b = ctx->cfg_map[use];
if (IR_GCM_IS_SCHEDULED_EARLY(b)) {
ir_gcm_schedule_late(ctx, use, b);
b = ctx->cfg_map[use];
IR_ASSERT(b != 0);
} else if (!b) {
continue;
} else if (ctx->ir_base[use].op == IR_PHI) {
ir_insn *insn = &ctx->ir_base[use];
ir_ref *p = insn->ops + 2; /* PHI data inputs */
ir_ref *q = ctx->ir_base[insn->op1].ops + 1; /* MERGE inputs */
ir_ref n = insn->inputs_count - 1;
lca = 0;
for (p = &ctx->use_edges[use_list->refs]; n > 0; p++, n--) {
use = *p;
b = _blocks[use];
if (!b) {
continue;
} else if (b < 0) {
ir_gcm_schedule_late(ctx, _blocks, use);
b = _blocks[use];
IR_ASSERT(b != 0);
}
insn = &ctx->ir_base[use];
if (insn->op == IR_PHI) {
ir_ref *p = insn->ops + 2; /* PHI data inputs */
ir_ref *q = ctx->ir_base[insn->op1].ops + 1; /* MERGE inputs */
ir_ref n = insn->inputs_count - 1;
for (;n > 0; p++, q++, n--) {
if (*p == ref) {
b = _blocks[*q];
lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b);
}
for (;n > 0; p++, q++, n--) {
if (*p == ref) {
b = ctx->cfg_map[*q];
lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b);
}
}
continue;
}
lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b);
}
IR_ASSERT(lca != 0 && "No Common Ancestor");
b = lca;
if (b != ctx->cfg_map[ref]) {
ir_block *bb = &ctx->cfg_blocks[b];
uint32_t loop_depth = bb->loop_depth;
if (loop_depth) {
uint32_t flags;
use_list = &ctx->use_lists[ref];
if (use_list->count == 1) {
use = ctx->use_edges[use_list->refs];
ir_insn *insn = &ctx->ir_base[use];
if (insn->op == IR_IF || insn->op == IR_GUARD || insn->op == IR_GUARD_NOT) {
ctx->cfg_map[ref] = b;
return;
}
}
flags = (bb->flags & IR_BB_LOOP_HEADER) ? bb->flags : ctx->cfg_blocks[bb->loop_header].flags;
if ((flags & IR_BB_LOOP_WITH_ENTRY)
&& !(ctx->binding && ir_binding_find(ctx, ref))) {
/* Don't move loop invariant code across an OSR ENTRY if we can't restore it */
} else {
lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b);
do {
lca = bb->dom_parent;
bb = &ctx->cfg_blocks[lca];
if (bb->loop_depth < loop_depth) {
if (!bb->loop_depth) {
b = lca;
break;
}
flags = (bb->flags & IR_BB_LOOP_HEADER) ? bb->flags : ctx->cfg_blocks[bb->loop_header].flags;
if ((flags & IR_BB_LOOP_WITH_ENTRY)
&& !(ctx->binding && ir_binding_find(ctx, ref))) {
break;
}
loop_depth = bb->loop_depth;
b = lca;
}
} while (lca != ctx->cfg_map[ref]);
}
}
IR_ASSERT(lca != 0 && "No Common Ancestor");
b = lca;
if (b != _blocks[ref]) {
ir_block *bb = &ctx->cfg_blocks[b];
uint32_t loop_depth = bb->loop_depth;
if (loop_depth) {
uint32_t flags;
use_list = &ctx->use_lists[ref];
if (use_list->count == 1) {
use = ctx->use_edges[use_list->refs];
insn = &ctx->ir_base[use];
if (insn->op == IR_IF || insn->op == IR_GUARD || insn->op == IR_GUARD_NOT) {
_blocks[ref] = b;
return;
}
}
flags = (bb->flags & IR_BB_LOOP_HEADER) ? bb->flags : ctx->cfg_blocks[bb->loop_header].flags;
if ((flags & IR_BB_LOOP_WITH_ENTRY)
&& !(ctx->binding && ir_binding_find(ctx, ref))) {
/* Don't move loop invariant code across an OSR ENTRY if we can't restore it */
} else {
do {
lca = bb->dom_parent;
bb = &ctx->cfg_blocks[lca];
if (bb->loop_depth < loop_depth) {
if (!bb->loop_depth) {
b = lca;
break;
}
flags = (bb->flags & IR_BB_LOOP_HEADER) ? bb->flags : ctx->cfg_blocks[bb->loop_header].flags;
if ((flags & IR_BB_LOOP_WITH_ENTRY)
&& !(ctx->binding && ir_binding_find(ctx, ref))) {
break;
}
loop_depth = bb->loop_depth;
b = lca;
}
} while (lca != _blocks[ref]);
}
}
_blocks[ref] = b;
if (ctx->ir_base[ref + 1].op == IR_OVERFLOW) {
/* OVERFLOW is a projection and must be scheduled together with previous ADD/SUB/MUL_OV */
_blocks[ref + 1] = b;
}
ctx->cfg_map[ref] = b;
if (ctx->ir_base[ref + 1].op == IR_OVERFLOW) {
/* OVERFLOW is a projection and must be scheduled together with previous ADD/SUB/MUL_OV */
ctx->cfg_map[ref + 1] = b;
}
}
}
static void ir_gcm_schedule_rest(ir_ctx *ctx, int32_t *_blocks, ir_ref ref)
static void ir_gcm_schedule_rest(ir_ctx *ctx, ir_ref ref)
{
ir_ref n, *p, use;
ir_insn *insn;
uint32_t b = ctx->cfg_map[ref];
uint32_t lca = 0;
IR_ASSERT(_blocks[ref] < 0);
_blocks[ref] = -_blocks[ref];
IR_ASSERT(ctx->ir_base[ref].op != IR_PARAM && ctx->ir_base[ref].op != IR_VAR);
IR_ASSERT(ctx->ir_base[ref].op != IR_PHI && ctx->ir_base[ref].op != IR_PI);
IR_ASSERT(IR_GCM_IS_SCHEDULED_EARLY(b));
b = IR_GCM_EARLY_BLOCK(b);
ctx->cfg_map[ref] = b;
n = ctx->use_lists[ref].count;
if (n) {
uint32_t lca;
int32_t b;
insn = &ctx->ir_base[ref];
IR_ASSERT(insn->op != IR_PARAM && insn->op != IR_VAR);
IR_ASSERT(insn->op != IR_PHI && insn->op != IR_PI);
for (p = &ctx->use_edges[ctx->use_lists[ref].refs]; n > 0; p++, n--) {
use = *p;
b = ctx->cfg_map[use];
if (IR_GCM_IS_SCHEDULED_EARLY(b)) {
ir_gcm_schedule_late(ctx, use, b);
b = ctx->cfg_map[use];
IR_ASSERT(b != 0);
} else if (!b) {
continue;
} else if (ctx->ir_base[use].op == IR_PHI) {
ir_insn *insn = &ctx->ir_base[use];
ir_ref *p = insn->ops + 2; /* PHI data inputs */
ir_ref *q = ctx->ir_base[insn->op1].ops + 1; /* MERGE inputs */
ir_ref n = insn->inputs_count - 1;
lca = 0;
for (p = &ctx->use_edges[ctx->use_lists[ref].refs]; n > 0; p++, n--) {
use = *p;
b = _blocks[use];
if (!b) {
continue;
} else if (b < 0) {
ir_gcm_schedule_late(ctx, _blocks, use);
b = _blocks[use];
IR_ASSERT(b != 0);
}
insn = &ctx->ir_base[use];
if (insn->op == IR_PHI) {
ir_ref *p = insn->ops + 2; /* PHI data inputs */
ir_ref *q = ctx->ir_base[insn->op1].ops + 1; /* MERGE inputs */
ir_ref n = insn->inputs_count - 1;
for (;n > 0; p++, q++, n--) {
if (*p == ref) {
b = _blocks[*q];
lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b);
}
for (;n > 0; p++, q++, n--) {
if (*p == ref) {
b = ctx->cfg_map[*q];
lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b);
}
} else {
lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b);
}
continue;
}
IR_ASSERT(lca != 0 && "No Common Ancestor");
b = lca;
_blocks[ref] = b;
if (ctx->ir_base[ref + 1].op == IR_OVERFLOW) {
/* OVERFLOW is a projection and must be scheduled together with previous ADD/SUB/MUL_OV */
_blocks[ref + 1] = b;
}
lca = !lca ? b : ir_gcm_find_lca(ctx, lca, b);
}
IR_ASSERT(lca != 0 && "No Common Ancestor");
b = lca;
ctx->cfg_map[ref] = b;
if (ctx->ir_base[ref + 1].op == IR_OVERFLOW) {
/* OVERFLOW is a projection and must be scheduled together with previous ADD/SUB/MUL_OV */
ctx->cfg_map[ref + 1] = b;
}
}
@@ -235,12 +231,12 @@ int ir_gcm(ir_ctx *ctx)
ir_list queue_early;
ir_list queue_late;
ir_list queue_rest;
int32_t *_blocks, b;
uint32_t *_blocks, b;
ir_insn *insn, *use_insn;
ir_use_list *use_list;
IR_ASSERT(ctx->cfg_map);
_blocks = (int32_t*)ctx->cfg_map;
_blocks = ctx->cfg_map;
ir_list_init(&queue_early, ctx->insns_count);
@@ -363,7 +359,7 @@ int ir_gcm(ir_ctx *ctx)
for (p = insn->ops + 2; k > 0; p++, k--) {
ref = *p;
if (ref > 0 && _blocks[ref] == 0) {
ir_gcm_schedule_early(ctx, _blocks, ref, &queue_rest);
ir_gcm_schedule_early(ctx, ref, &queue_rest);
}
}
}
@@ -372,7 +368,7 @@ int ir_gcm(ir_ctx *ctx)
if (ctx->flags & IR_DEBUG_GCM) {
fprintf(stderr, "GCM Schedule Early\n");
for (n = 1; n < ctx->insns_count; n++) {
fprintf(stderr, "%d -> %d\n", n, _blocks[n]);
fprintf(stderr, "%d -> %d\n", n, ctx->cfg_map[n]);
}
}
#endif
@@ -385,8 +381,9 @@ int ir_gcm(ir_ctx *ctx)
k = use_list->count;
for (p = &ctx->use_edges[use_list->refs]; k > 0; p++, k--) {
ref = *p;
if (_blocks[ref] < 0) {
ir_gcm_schedule_late(ctx, _blocks, ref);
b = _blocks[ref];
if (IR_GCM_IS_SCHEDULED_EARLY(b)) {
ir_gcm_schedule_late(ctx, ref, b);
}
}
}
@@ -395,7 +392,7 @@ int ir_gcm(ir_ctx *ctx)
while (n > 0) {
n--;
ref = ir_list_at(&queue_rest, n);
ir_gcm_schedule_rest(ctx, _blocks, ref);
ir_gcm_schedule_rest(ctx, ref);
}
ir_list_free(&queue_early);
@@ -406,7 +403,7 @@ int ir_gcm(ir_ctx *ctx)
if (ctx->flags & IR_DEBUG_GCM) {
fprintf(stderr, "GCM Schedule Late\n");
for (n = 1; n < ctx->insns_count; n++) {
fprintf(stderr, "%d -> %d\n", n, _blocks[n]);
fprintf(stderr, "%d -> %d\n", n, ctx->cfg_map[n]);
}
}
#endif

View File

@@ -923,9 +923,34 @@ struct _ir_use_list {
void ir_use_list_remove_all(ir_ctx *ctx, ir_ref from, ir_ref use);
void ir_use_list_remove_one(ir_ctx *ctx, ir_ref from, ir_ref use);
void ir_use_list_replace(ir_ctx *ctx, ir_ref ref, ir_ref use, ir_ref new_use);
void ir_use_list_replace_all(ir_ctx *ctx, ir_ref ref, ir_ref use, ir_ref new_use);
void ir_use_list_replace_one(ir_ctx *ctx, ir_ref ref, ir_ref use, ir_ref new_use);
bool ir_use_list_add(ir_ctx *ctx, ir_ref to, ir_ref new_use);
/*** Modification helpers ***/
#define MAKE_NOP(_insn) do { \
ir_insn *__insn = _insn; \
__insn->optx = IR_NOP; \
__insn->op1 = __insn->op2 = __insn->op3 = IR_UNUSED; \
} while (0)
#define CLEAR_USES(_ref) do { \
ir_use_list *__use_list = &ctx->use_lists[_ref]; \
__use_list->count = 0; \
} while (0)
#define SWAP_REFS(_ref1, _ref2) do { \
ir_ref _tmp = _ref1; \
_ref1 = _ref2; \
_ref2 = _tmp; \
} while (0)
#define SWAP_INSNS(_insn1, _insn2) do { \
ir_insn *_tmp = _insn1; \
_insn1 = _insn2; \
_insn2 = _tmp; \
} while (0)
/*** IR Basic Blocks info ***/
#define IR_IS_BB_START(op) \
((ir_op_flags[op] & IR_OP_FLAG_BB_START) != 0)

View File

@@ -269,12 +269,9 @@ static bool ir_sccp_is_equal(ir_ctx *ctx, ir_insn *_values, ir_ref a, ir_ref b)
static void ir_sccp_make_nop(ir_ctx *ctx, ir_ref ref)
{
ir_ref j, n, *p;
ir_use_list *use_list = &ctx->use_lists[ref];
ir_insn *insn;
use_list->refs = 0;
use_list->count = 0;
CLEAR_USES(ref);
insn = &ctx->ir_base[ref];
n = insn->inputs_count;
insn->opt = IR_NOP; /* keep "inputs_count" */
@@ -286,12 +283,9 @@ static void ir_sccp_make_nop(ir_ctx *ctx, ir_ref ref)
static void ir_sccp_remove_insn(ir_ctx *ctx, ir_insn *_values, ir_ref ref, ir_bitqueue *worklist)
{
ir_ref j, n, *p;
ir_use_list *use_list = &ctx->use_lists[ref];
ir_insn *insn;
use_list->refs = 0;
use_list->count = 0;
CLEAR_USES(ref);
insn = &ctx->ir_base[ref];
n = insn->inputs_count;
insn->opt = IR_NOP; /* keep "inputs_count" */
@@ -313,12 +307,9 @@ static void ir_sccp_remove_insn(ir_ctx *ctx, ir_insn *_values, ir_ref ref, ir_bi
static void ir_sccp_remove_insn2(ir_ctx *ctx, ir_ref ref, ir_bitqueue *worklist)
{
ir_ref j, n, *p;
ir_use_list *use_list = &ctx->use_lists[ref];
ir_insn *insn;
use_list->refs = 0;
use_list->count = 0;
CLEAR_USES(ref);
insn = &ctx->ir_base[ref];
n = insn->inputs_count;
insn->opt = IR_NOP; /* keep "inputs_count" */
@@ -392,8 +383,7 @@ static void ir_sccp_replace_insn(ir_ctx *ctx, ir_insn *_values, ir_ref ref, ir_r
}
}
use_list->refs = 0;
use_list->count = 0;
CLEAR_USES(ref);
}
static void ir_sccp_replace_insn2(ir_ctx *ctx, ir_ref ref, ir_ref new_ref, ir_bitqueue *worklist)
@@ -446,8 +436,7 @@ static void ir_sccp_replace_insn2(ir_ctx *ctx, ir_ref ref, ir_ref new_ref, ir_bi
ir_bitqueue_add(worklist, use);
}
use_list->refs = 0;
use_list->count = 0;
CLEAR_USES(ref);
}
static void ir_sccp_fold2(ir_ctx *ctx, ir_ref ref, ir_bitqueue *worklist)
@@ -488,26 +477,26 @@ restart:
IR_ASSERT(!IR_OP_HAS_VAR_INPUTS(ir_op_flags[opt & IR_OPT_OP_MASK]));
insn->inputs_count = IR_INPUT_EDGES_COUNT(ir_op_flags[opt & IR_OPT_OP_MASK]);
if (insn->op1 != ctx->fold_insn.op1) {
if (!IR_IS_CONST_REF(insn->op1) && insn->op1 != ctx->fold_insn.op2 && insn->op1 != ctx->fold_insn.op3) {
ir_use_list_remove_all(ctx, insn->op1, ref);
if (insn->op1 > 0) {
ir_use_list_remove_one(ctx, insn->op1, ref);
}
if (!IR_IS_CONST_REF(ctx->fold_insn.op1) && ctx->fold_insn.op1 != insn->op2 && ctx->fold_insn.op1 != insn->op3) {
if (ctx->fold_insn.op1 > 0) {
ir_use_list_add(ctx, ctx->fold_insn.op1, ref);
}
}
if (insn->op2 != ctx->fold_insn.op2) {
if (!IR_IS_CONST_REF(insn->op2) && insn->op2 != ctx->fold_insn.op1 && insn->op2 != ctx->fold_insn.op3) {
ir_use_list_remove_all(ctx, insn->op2, ref);
if (insn->op2 > 0) {
ir_use_list_remove_one(ctx, insn->op2, ref);
}
if (!IR_IS_CONST_REF(ctx->fold_insn.op2) && ctx->fold_insn.op2 != insn->op1 && ctx->fold_insn.op2 != insn->op3) {
if (ctx->fold_insn.op2 > 0) {
ir_use_list_add(ctx, ctx->fold_insn.op2, ref);
}
}
if (insn->op3 != ctx->fold_insn.op3) {
if (!IR_IS_CONST_REF(insn->op3) && insn->op3 != ctx->fold_insn.op1 && insn->op3 != ctx->fold_insn.op2) {
ir_use_list_remove_all(ctx, insn->op3, ref);
if (insn->op3 > 0) {
ir_use_list_remove_one(ctx, insn->op3, ref);
}
if (!IR_IS_CONST_REF(ctx->fold_insn.op3) && ctx->fold_insn.op3 != insn->op1 && ctx->fold_insn.op3 != insn->op2) {
if (ctx->fold_insn.op3 > 0) {
ir_use_list_add(ctx, ctx->fold_insn.op3, ref);
}
}
@@ -552,7 +541,7 @@ static void ir_sccp_remove_if(ir_ctx *ctx, ir_insn *_values, ir_ref ref, ir_ref
next_insn = &ctx->ir_base[next];
/* remove IF and IF_TRUE/FALSE from double linked control list */
next_insn->op1 = insn->op1;
ir_use_list_replace(ctx, insn->op1, ref, next);
ir_use_list_replace_one(ctx, insn->op1, ref, next);
/* remove IF and IF_TRUE/FALSE instructions */
ir_sccp_make_nop(ctx, ref);
ir_sccp_make_nop(ctx, use);
@@ -610,7 +599,7 @@ static void ir_sccp_remove_unfeasible_merge_inputs(ir_ctx *ctx, ir_insn *_values
IR_ASSERT(prev && next);
/* remove MERGE and input END from double linked control list */
next_insn->op1 = prev;
ir_use_list_replace(ctx, prev, input, next);
ir_use_list_replace_one(ctx, prev, input, next);
/* remove MERGE and input END instructions */
ir_sccp_make_nop(ctx, ref);
ir_sccp_make_nop(ctx, input);
@@ -648,7 +637,7 @@ static void ir_sccp_remove_unfeasible_merge_inputs(ir_ctx *ctx, ir_insn *_values
}
}
j = i;
while (j < n) {
while (j <= n) {
ir_insn_set_op(insn, j, IR_UNUSED);
j++;
}
@@ -766,10 +755,9 @@ static ir_ref ir_promote_d2f(ir_ctx *ctx, ir_ref ref, ir_ref use)
case IR_FP2FP:
ir_use_list_remove_all(ctx, ref, use);
if (ctx->use_lists[ref].count == 0) {
ir_use_list_replace(ctx, insn->op1, ref, use);
ir_use_list_replace_one(ctx, insn->op1, ref, use);
ref = insn->op1;
insn->optx = IR_NOP;
insn->op1 = IR_UNUSED;
MAKE_NOP(insn);
return ref;
} else {
ir_use_list_add(ctx, insn->op1, use);
@@ -817,10 +805,9 @@ static ir_ref ir_promote_f2d(ir_ctx *ctx, ir_ref ref, ir_ref use)
case IR_FP2FP:
ir_use_list_remove_all(ctx, ref, use);
if (ctx->use_lists[ref].count == 0) {
ir_use_list_replace(ctx, insn->op1, ref, use);
ir_use_list_replace_one(ctx, insn->op1, ref, use);
ref = insn->op1;
insn->optx = IR_NOP;
insn->op1 = IR_UNUSED;
MAKE_NOP(insn);
return ref;
} else {
ir_use_list_add(ctx, insn->op1, use);
@@ -903,10 +890,9 @@ static ir_ref ir_promote_i2i(ir_ctx *ctx, ir_type type, ir_ref ref, ir_ref use)
case IR_SEXT:
ir_use_list_remove_all(ctx, ref, use);
if (ctx->use_lists[ref].count == 0) {
ir_use_list_replace(ctx, insn->op1, ref, use);
ir_use_list_replace_one(ctx, insn->op1, ref, use);
ref = insn->op1;
insn->optx = IR_NOP;
insn->op1 = IR_UNUSED;
MAKE_NOP(insn);
return ref;
} else {
ir_use_list_add(ctx, insn->op1, use);
@@ -1272,11 +1258,13 @@ int ir_sccp(ir_ctx *ctx)
if (insn->type == IR_FLOAT) {
if (ir_may_promote_d2f(ctx, insn->op1)) {
ir_ref ref = ir_promote_d2f(ctx, insn->op1, i);
insn->op1 = ref;
ir_sccp_replace_insn2(ctx, i, ref, &worklist2);
}
} else {
if (ir_may_promote_f2d(ctx, insn->op1)) {
ir_ref ref = ir_promote_f2d(ctx, insn->op1, i);
insn->op1 = ref;
ir_sccp_replace_insn2(ctx, i, ref, &worklist2);
}
}
@@ -1295,6 +1283,7 @@ int ir_sccp(ir_ctx *ctx)
case IR_TRUNC:
if (ir_may_promote_i2i(ctx, insn->type, insn->op1)) {
ir_ref ref = ir_promote_i2i(ctx, insn->type, insn->op1, i);
insn->op1 = ref;
ir_sccp_replace_insn2(ctx, i, ref, &worklist2);
}
break;
@@ -1311,7 +1300,7 @@ int ir_sccp(ir_ctx *ctx)
/* remove LOAD from double linked control list */
ctx->ir_base[next].op1 = insn->op1;
ir_use_list_replace(ctx, insn->op1, i, next);
ir_use_list_replace_one(ctx, insn->op1, i, next);
insn->op1 = IR_UNUSED;
ir_sccp_remove_insn2(ctx, i, &worklist2);
}