参照元†
- basic_block bb
- bool disable_tail_calls
返り値†
/* Expand basic block BB from GIMPLE trees to RTL. */
static basic_block
expand_gimple_basic_block (basic_block bb, bool disable_tail_calls)
{
gimple_stmt_iterator gsi;
gimple_seq stmts;
gimple *stmt = NULL;
rtx_note *note = NULL;
rtx_insn *last;
edge e;
edge_iterator ei;
if (dump_file)
fprintf (dump_file, "\n;; Generating RTL for gimple basic block %d\n",
bb->index);
/* Note that since we are now transitioning from GIMPLE to RTL, we
cannot use the gsi_*_bb() routines because they expect the basic
block to be in GIMPLE, instead of RTL. Therefore, we need to
access the BB sequence directly. */
if (optimize)
reorder_operands (bb);
stmts = bb_seq (bb);
bb->il.gimple.seq = NULL;
bb->il.gimple.phi_nodes = NULL;
rtl_profile_for_bb (bb);
init_rtl_bb_info (bb);
bb->flags |= BB_RTL;
/* Remove the RETURN_EXPR if we may fall though to the exit
instead. */
gsi = gsi_last (stmts);
if (!gsi_end_p (gsi)
&& gimple_code (gsi_stmt (gsi)) == GIMPLE_RETURN)
{
greturn *ret_stmt = as_a <greturn *> (gsi_stmt (gsi));
gcc_assert (single_succ_p (bb));
gcc_assert (single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun));
if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
&& !gimple_return_retval (ret_stmt))
{
gsi_remove (&gsi, false);
single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
}
}
gsi = gsi_start (stmts);
if (!gsi_end_p (gsi))
{
stmt = gsi_stmt (gsi);
if (gimple_code (stmt) != GIMPLE_LABEL)
stmt = NULL;
}
rtx_code_label **elt = lab_rtx_for_bb->get (bb);
if (stmt || elt)
{
gcc_checking_assert (!note);
last = get_last_insn ();
if (stmt)
{
expand_gimple_stmt (stmt);
gsi_next (&gsi);
}
if (elt)
emit_label (*elt);
BB_HEAD (bb) = NEXT_INSN (last);
if (NOTE_P (BB_HEAD (bb)))
BB_HEAD (bb) = NEXT_INSN (BB_HEAD (bb));
gcc_assert (LABEL_P (BB_HEAD (bb)));
note = emit_note_after (NOTE_INSN_BASIC_BLOCK, BB_HEAD (bb));
maybe_dump_rtl_for_gimple_stmt (stmt, last);
}
else
BB_HEAD (bb) = note = emit_note (NOTE_INSN_BASIC_BLOCK);
if (note)
NOTE_BASIC_BLOCK (note) = bb;
for (; !gsi_end_p (gsi); gsi_next (&gsi))
{
basic_block new_bb;
stmt = gsi_stmt (gsi);
/* If this statement is a non-debug one, and we generate debug
insns, then this one might be the last real use of a TERed
SSA_NAME, but where there are still some debug uses further
down. Expanding the current SSA name in such further debug
uses by their RHS might lead to wrong debug info, as coalescing
might make the operands of such RHS be placed into the same
pseudo as something else. Like so:
a_1 = a_0 + 1; // Assume a_1 is TERed and a_0 is dead
use(a_1);
a_2 = ...
#DEBUG ... => a_1
As a_0 and a_2 don't overlap in lifetime, assume they are coalesced.
If we now would expand a_1 by it's RHS (a_0 + 1) in the debug use,
the write to a_2 would actually have clobbered the place which
formerly held a_0.
So, instead of that, we recognize the situation, and generate
debug temporaries at the last real use of TERed SSA names:
a_1 = a_0 + 1;
#DEBUG #D1 => a_1
use(a_1);
a_2 = ...
#DEBUG ... => #D1
*/
if (MAY_HAVE_DEBUG_BIND_INSNS
&& SA.values
&& !is_gimple_debug (stmt))
{
ssa_op_iter iter;
tree op;
gimple *def;
location_t sloc = curr_insn_location ();
/* Look for SSA names that have their last use here (TERed
names always have only one real use). */
FOR_EACH_SSA_TREE_OPERAND (op, stmt, iter, SSA_OP_USE)
if ((def = get_gimple_for_ssa_name (op)))
{
imm_use_iterator imm_iter;
use_operand_p use_p;
bool have_debug_uses = false;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, op)
{
if (gimple_debug_bind_p (USE_STMT (use_p)))
{
have_debug_uses = true;
break;
}
}
if (have_debug_uses)
{
/* OP is a TERed SSA name, with DEF its defining
statement, and where OP is used in further debug
instructions. Generate a debug temporary, and
replace all uses of OP in debug insns with that
temporary. */
gimple *debugstmt;
tree value = gimple_assign_rhs_to_tree (def);
tree vexpr = make_node (DEBUG_EXPR_DECL);
rtx val;
machine_mode mode;
set_curr_insn_location (gimple_location (def));
DECL_ARTIFICIAL (vexpr) = 1;
TREE_TYPE (vexpr) = TREE_TYPE (value);
if (DECL_P (value))
mode = DECL_MODE (value);
else
mode = TYPE_MODE (TREE_TYPE (value));
SET_DECL_MODE (vexpr, mode);
val = gen_rtx_VAR_LOCATION
(mode, vexpr, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
emit_debug_insn (val);
FOR_EACH_IMM_USE_STMT (debugstmt, imm_iter, op)
{
if (!gimple_debug_bind_p (debugstmt))
continue;
FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
SET_USE (use_p, vexpr);
update_stmt (debugstmt);
}
}
}
set_curr_insn_location (sloc);
}
currently_expanding_gimple_stmt = stmt;
/* Expand this statement, then evaluate the resulting RTL and
fixup the CFG accordingly. */
if (gimple_code (stmt) == GIMPLE_COND)
{
new_bb = expand_gimple_cond (bb, as_a <gcond *> (stmt));
if (new_bb)
return new_bb;
}
else if (is_gimple_debug (stmt))
{
location_t sloc = curr_insn_location ();
gimple_stmt_iterator nsi = gsi;
for (;;)
{
tree var;
tree value = NULL_TREE;
rtx val = NULL_RTX;
machine_mode mode;
if (!gimple_debug_nonbind_marker_p (stmt))
{
if (gimple_debug_bind_p (stmt))
{
var = gimple_debug_bind_get_var (stmt);
if (TREE_CODE (var) != DEBUG_EXPR_DECL
&& TREE_CODE (var) != LABEL_DECL
&& !target_for_debug_bind (var))
goto delink_debug_stmt;
if (DECL_P (var))
mode = DECL_MODE (var);
else
mode = TYPE_MODE (TREE_TYPE (var));
if (gimple_debug_bind_has_value_p (stmt))
value = gimple_debug_bind_get_value (stmt);
val = gen_rtx_VAR_LOCATION
(mode, var, (rtx)value, VAR_INIT_STATUS_INITIALIZED);
}
else if (gimple_debug_source_bind_p (stmt))
{
var = gimple_debug_source_bind_get_var (stmt);
value = gimple_debug_source_bind_get_value (stmt);
mode = DECL_MODE (var);
val = gen_rtx_VAR_LOCATION (mode, var, (rtx)value,
VAR_INIT_STATUS_UNINITIALIZED);
}
else
gcc_unreachable ();
}
/* If this function was first compiled with markers
enabled, but they're now disable (e.g. LTO), drop
them on the floor. */
else if (gimple_debug_nonbind_marker_p (stmt)
&& !MAY_HAVE_DEBUG_MARKER_INSNS)
goto delink_debug_stmt;
else if (gimple_debug_begin_stmt_p (stmt))
val = GEN_RTX_DEBUG_MARKER_BEGIN_STMT_PAT ();
else if (gimple_debug_inline_entry_p (stmt))
{
tree block = gimple_block (stmt);
if (block)
val = GEN_RTX_DEBUG_MARKER_INLINE_ENTRY_PAT ();
else
goto delink_debug_stmt;
}
else
gcc_unreachable ();
last = get_last_insn ();
set_curr_insn_location (gimple_location (stmt));
emit_debug_insn (val);
if (dump_file && (dump_flags & TDF_DETAILS))
{
/* We can't dump the insn with a TREE where an RTX
is expected. */
if (GET_CODE (val) == VAR_LOCATION)
{
gcc_checking_assert (PAT_VAR_LOCATION_LOC (val) == (rtx)value);
PAT_VAR_LOCATION_LOC (val) = const0_rtx;
}
maybe_dump_rtl_for_gimple_stmt (stmt, last);
if (GET_CODE (val) == VAR_LOCATION)
PAT_VAR_LOCATION_LOC (val) = (rtx)value;
}
delink_debug_stmt:
/* In order not to generate too many debug temporaries,
we delink all uses of debug statements we already expanded.
Therefore debug statements between definition and real
use of TERed SSA names will continue to use the SSA name,
and not be replaced with debug temps. */
delink_stmt_imm_use (stmt);
gsi = nsi;
gsi_next (&nsi);
if (gsi_end_p (nsi))
break;
stmt = gsi_stmt (nsi);
if (!is_gimple_debug (stmt))
break;
}
set_curr_insn_location (sloc);
}
else
{
gcall *call_stmt = dyn_cast <gcall *> (stmt);
if (call_stmt
&& gimple_call_tail_p (call_stmt)
&& disable_tail_calls)
gimple_call_set_tail (call_stmt, false);
if (call_stmt && gimple_call_tail_p (call_stmt))
{
bool can_fallthru;
new_bb = expand_gimple_tailcall (bb, call_stmt, &can_fallthru);
if (new_bb)
{
if (can_fallthru)
bb = new_bb;
else
return new_bb;
}
}
else
{
def_operand_p def_p;
def_p = SINGLE_SSA_DEF_OPERAND (stmt, SSA_OP_DEF);
if (def_p != NULL)
{
/* Ignore this stmt if it is in the list of
replaceable expressions. */
if (SA.values
&& bitmap_bit_p (SA.values,
SSA_NAME_VERSION (DEF_FROM_PTR (def_p))))
continue;
}
last = expand_gimple_stmt (stmt);
maybe_dump_rtl_for_gimple_stmt (stmt, last);
}
}
}
currently_expanding_gimple_stmt = NULL;
/* Expand implicit goto and convert goto_locus. */
FOR_EACH_EDGE (e, ei, bb->succs)
{
if (e->goto_locus != UNKNOWN_LOCATION)
set_curr_insn_location (e->goto_locus);
if ((e->flags & EDGE_FALLTHRU) && e->dest != bb->next_bb)
{
emit_jump (label_rtx_for_bb (e->dest));
e->flags &= ~EDGE_FALLTHRU;
}
}
/* Expanded RTL can create a jump in the last instruction of block.
This later might be assumed to be a jump to successor and break edge insertion.
We need to insert dummy move to prevent this. PR41440. */
if (single_succ_p (bb)
&& (single_succ_edge (bb)->flags & EDGE_FALLTHRU)
&& (last = get_last_insn ())
&& (JUMP_P (last)
|| (DEBUG_INSN_P (last)
&& JUMP_P (prev_nondebug_insn (last)))))
{
rtx dummy = gen_reg_rtx (SImode);
emit_insn_after_noloc (gen_move_insn (dummy, dummy), last, NULL);
}
do_pending_stack_adjust ();
/* Find the block tail. The last insn in the block is the insn
before a barrier and/or table jump insn. */
last = get_last_insn ();
if (BARRIER_P (last))
last = PREV_INSN (last);
if (JUMP_TABLE_DATA_P (last))
last = PREV_INSN (PREV_INSN (last));
BB_END (bb) = last;
update_bb_for_insn (bb);
return bb;
}
コメント†