GCC Middle and Back End API Reference
sched-int.h File Reference

Go to the source code of this file.

Data Structures

struct  common_sched_info_def
struct  ready_list
struct  dep_replacement
struct  _dep
struct  _dep_link
struct  _deps_list
struct  _dep_node
struct  deps_reg
struct  deps_desc
struct  haifa_sched_info
struct  spec_info_def
struct  _haifa_deps_insn_data
struct  reg_pressure_data
struct  reg_use_data
struct  reg_set_data
struct  _haifa_insn_data
struct  sched_deps_info_def
struct  region
struct  _sd_iterator


typedef vec< basic_blockbb_vec_t
typedef vec< rtxinsn_vec_t
typedef vec< rtxrtx_vec_t
typedef unsigned int ds_t
typedef unsigned int dw_t
typedef struct _dep dep_def
typedef dep_defdep_t
typedef struct _dep_nodedep_node_t
typedef struct _dep_linkdep_link_t
typedef struct _deps_listdeps_list_t
typedef struct deps_descdeps_t
typedef struct spec_info_defspec_info_t
typedef struct _haifa_insn_data haifa_insn_data_def
typedef haifa_insn_data_defhaifa_insn_data_t
typedef struct
typedef haifa_deps_insn_data_defhaifa_deps_insn_data_t
typedef int sd_list_types_def
typedef struct _sd_iterator sd_iterator_def


enum  sched_pass_id_t {
enum  reg_pending_barrier_mode { NOT_A_BARRIER = 0, MOVE_BARRIER, TRUE_BARRIER }
enum  post_call_group { not_post_call, post_call, post_call_initial }


void sched_init_bbs (void)
void sched_extend_luids (void)
void sched_init_insn_luid (rtx)
void sched_init_luids (bb_vec_t)
void sched_finish_luids (void)
void sched_extend_target (void)
void haifa_init_h_i_d (bb_vec_t)
void haifa_finish_h_i_d (void)
static bool sel_sched_p ()
int get_rgn_sched_max_insns_priority (void)
void sel_add_to_insn_priority (rtx, int)
int insn_luid (rtx)
void remove_notes (rtx, rtx)
rtx restore_other_notes (rtx, basic_block)
void sched_insns_init (rtx)
void sched_insns_finish (void)
void * xrecalloc (void *, size_t, size_t, size_t)
void reemit_notes (rtx)
int haifa_classify_insn (const_rtx)
void sel_find_rgns (void)
void sel_mark_hard_insn (rtx)
void advance_state (state_t)
void setup_sched_dump (void)
void sched_init (void)
void sched_finish (void)
bool sel_insn_is_speculation_check (rtx)
int max_issue (struct ready_list *, int, state_t, bool, int *)
void ebb_compute_jump_reg_dependencies (rtx, regset)
edge find_fallthru_edge_from (basic_block)
basic_block sched_split_block_1 (basic_block, rtx)
basic_block sched_create_empty_bb_1 (basic_block)
basic_block sched_create_recovery_block (basic_block *)
void sched_create_recovery_edges (basic_block, basic_block, basic_block)
enum reg_note ds_to_dk (ds_t)
ds_t dk_to_ds (enum reg_note)
void init_dep_1 (dep_t, rtx, rtx, enum reg_note, ds_t)
void init_dep (dep_t, rtx, rtx, enum reg_note)
void sd_debug_dep (dep_t)
rtx sched_get_reverse_condition_uncached (const_rtx)
bool sched_insns_conditions_mutex_p (const_rtx, const_rtx)
bool sched_insn_is_legitimate_for_speculation_p (const_rtx, ds_t)
void add_dependence (rtx, rtx, enum reg_note)
void sched_analyze (struct deps_desc *, rtx, rtx)
void init_deps (struct deps_desc *, bool)
void init_deps_reg_last (struct deps_desc *)
void free_deps (struct deps_desc *)
void init_deps_global (void)
void finish_deps_global (void)
void deps_analyze_insn (struct deps_desc *, rtx)
void remove_from_deps (struct deps_desc *, rtx)
void init_insn_reg_pressure_info (rtx)
dw_t get_dep_weak (ds_t, ds_t)
ds_t set_dep_weak (ds_t, ds_t, dw_t)
dw_t estimate_dep_weak (rtx, rtx)
ds_t ds_merge (ds_t, ds_t)
ds_t ds_full_merge (ds_t, ds_t, rtx, rtx)
ds_t ds_max_merge (ds_t, ds_t)
dw_t ds_weak (ds_t)
ds_t ds_get_speculation_types (ds_t)
ds_t ds_get_max_dep_weak (ds_t)
void sched_deps_init (bool)
void sched_deps_finish (void)
void haifa_note_reg_set (int)
void haifa_note_reg_clobber (int)
void haifa_note_reg_use (int)
void maybe_extend_reg_info_p (void)
void deps_start_bb (struct deps_desc *, rtx)
enum reg_note ds_to_dt (ds_t)
bool deps_pools_are_empty_p (void)
void sched_free_deps (rtx, rtx, bool)
void extend_dependency_caches (int, bool)
void debug_ds (ds_t)
void sched_init_region_reg_pressure_info (void)
void get_ebb_head_tail (basic_block, basic_block, rtx *, rtx *)
int no_real_insns_p (const_rtx, const_rtx)
int insn_cost (rtx)
int dep_cost_1 (dep_t, dw_t)
int dep_cost (dep_t)
int set_priorities (rtx, rtx)
void sched_setup_bb_reg_pressure_info (basic_block, rtx)
bool schedule_block (basic_block *, state_t)
void ready_sort (struct ready_list *)
rtx ready_element (struct ready_list *, int)
rtxready_lastpos (struct ready_list *)
int try_ready (rtx)
void sched_extend_ready_list (int)
void sched_finish_ready_list (void)
void sched_change_pattern (rtx, rtx)
int sched_speculate_insn (rtx, ds_t, rtx *)
void unlink_bb_notes (basic_block, basic_block)
void add_block (basic_block, basic_block)
rtx bb_note (basic_block)
void concat_note_lists (rtx, rtx *)
rtx sched_emit_insn (rtx)
rtx get_ready_element (int)
int number_in_ready (void)
basic_block schedule_ebb (rtx, rtx, bool)
void schedule_ebbs_init (void)
void schedule_ebbs_finish (void)
void set_modulo_params (int, int, int, int)
void record_delay_slot_pair (rtx, rtx, int, int)
rtx real_insn_for_shadow (rtx)
void discard_delay_pairs_above (int)
void free_delay_pairs (void)
void add_delay_dependencies (rtx)
bool sched_is_disabled_for_current_region_p (void)
void sched_rgn_init (bool)
void sched_rgn_finish (void)
void rgn_setup_region (int)
void sched_rgn_compute_dependencies (int)
void sched_rgn_local_init (int)
void sched_rgn_local_finish (void)
void sched_rgn_local_free (void)
void extend_regions (void)
void rgn_make_new_region_out_of_new_block (basic_block)
void compute_priorities (void)
void increase_insn_priority (rtx, int)
void debug_rgn_dependencies (int)
void debug_dependencies (rtx, rtx)
void free_rgn_deps (void)
int contributes_to_priority (rtx, rtx)
void extend_rgns (int *, int *, sbitmap, int *)
void deps_join (struct deps_desc *, struct deps_desc *)
void rgn_setup_common_sched_info (void)
void rgn_setup_sched_infos (void)
void debug_regions (void)
void debug_region (int)
void dump_region_dot (FILE *, int)
void dump_region_dot_file (const char *, int)
void haifa_sched_init (void)
void haifa_sched_finish (void)
void find_modifiable_mems (rtx, rtx)
void sd_next_list (const_rtx, sd_list_types_def *, deps_list_t *, bool *)
static sd_iterator_def sd_iterator_start ()
static bool sd_iterator_cond ()
static void sd_iterator_next ()
int sd_lists_size (const_rtx, sd_list_types_def)
bool sd_lists_empty_p (const_rtx, sd_list_types_def)
void sd_init_insn (rtx)
void sd_finish_insn (rtx)
dep_t sd_find_dep_between (rtx, rtx, bool)
void sd_add_dep (dep_t, bool)
enum DEPS_ADJUST_RESULT sd_add_or_update_dep (dep_t, bool)
void sd_resolve_dep (sd_iterator_def)
void sd_unresolve_dep (sd_iterator_def)
void sd_copy_back_deps (rtx, rtx, bool)
void sd_delete_dep (sd_iterator_def)
void sd_debug_lists (rtx, sd_list_types_def)


struct common_sched_info_defcommon_sched_info
struct common_sched_info_def haifa_common_sched_info
int sched_emulate_haifa_p
vec< int > sched_luids
int sched_max_luid
rtx note_list
size_t dfa_state_size
char * ready_try
struct ready_list ready
void(* sched_init_only_bb )(basic_block, basic_block)
basic_block(* sched_split_block )(basic_block, rtx)
basic_block(* sched_create_empty_bb )(basic_block)
state_t curr_state
spec_info_t spec_info
struct haifa_sched_infocurrent_sched_info
enum sched_pressure_algorithm sched_pressure
enum reg_class * sched_regno_pressure_class
vec< haifa_insn_data_defh_i_d
vec< haifa_deps_insn_data_defh_d_i_d
FILE * sched_dump
int sched_verbose
bool haifa_recovery_bb_ever_added_p
struct sched_deps_info_defsched_deps_info
int cycle_issued_insns
int issue_rate
int dfa_lookahead
int nr_regions
int * rgn_bb_table
int * block_to_bb
int * containing_rgn
int * ebb_head
int current_nr_blocks
int current_blocks
int target_bb
bool sched_no_dce

Typedef Documentation

typedef struct _dep dep_def
typedef struct _dep_link* dep_link_t
typedef struct _dep_node* dep_node_t
typedef dep_def* dep_t
typedef struct _deps_list* deps_list_t
typedef struct deps_desc* deps_t
typedef unsigned int ds_t
   Type to represent status of a dependence.  
typedef unsigned int dw_t
   Type to represent weakness of speculative dependence.  
typedef vec<rtx> insn_vec_t
typedef vec<rtx> rtx_vec_t
typedef struct _sd_iterator sd_iterator_def
typedef int sd_list_types_def
   A type to hold above flags.  
typedef struct spec_info_def* spec_info_t

Enumeration Type Documentation

   This represents the results of calling sched-deps.c functions,
   which modify dependencies.  
     No dependence needed (e.g. producer == consumer).  
     Dependence is already present and wasn't modified.  
     Existing dependence was modified to include additional information.  
     New dependence has been created.  
   Exception Free Loads:

   We define five classes of speculative loads: IFREE, IRISKY,

   IFREE loads are loads that are proved to be exception-free, just
   by examining the load insn.  Examples for such loads are loads
   from TOC and loads of global data.

   IRISKY loads are loads that are proved to be exception-risky,
   just by examining the load insn.  Examples for such loads are
   volatile loads and loads from shared memory.

   PFREE loads are loads for which we can prove, by examining other
   insns, that they are exception-free.  Currently, this class consists
   of loads for which we are able to find a "similar load", either in
   the target block, or, if only one split-block exists, in that split
   block.  Load2 is similar to load1 if both have same single base
   register.  We identify only part of the similar loads, by finding
   an insn upon which both load1 and load2 have a DEF-USE dependence.

   PRISKY loads are loads for which we can prove, by examining other
   insns, that they are exception-risky.  Currently we have two proofs for
   such loads.  The first proof detects loads that are probably guarded by a
   test on the memory address.  This proof is based on the
   backward and forward data dependence information for the region.
   Let load-insn be the examined load.
   Load-insn is PRISKY iff ALL the following hold:

   - insn1 is not in the same block as load-insn
   - there is a DEF-USE dependence chain (insn1, ..., load-insn)
   - test-insn is either a compare or a branch, not in the same block
     as load-insn
   - load-insn is reachable from test-insn
   - there is a DEF-USE dependence chain (insn1, ..., test-insn)

   This proof might fail when the compare and the load are fed
   by an insn not in the region.  To solve this, we will add to this
   group all loads that have no input DEF-USE dependence.

   The second proof detects loads that are directly or indirectly
   fed by a speculative load.  This proof is affected by the
   scheduling process.  We will use the flag  fed_by_spec_load.
   Initially, all insns have this flag reset.  After a speculative
   motion of an insn, if insn is either a load, or marked as
   fed_by_spec_load, we will also mark as fed_by_spec_load every
   insn1 for which a DEF-USE dependence (insn, insn1) exists.  A
   load which is fed_by_spec_load is also PRISKY.

   MFREE (maybe-free) loads are all the remaining loads. They may be
   exception-free, but we cannot prove it.

   Now, all loads in IFREE and PFREE classes are considered
   exception-free, while all loads in IRISKY and PRISKY classes are
   considered exception-risky.  As for loads in the MFREE class,
   these are considered either exception-free or exception-risky,
   depending on whether we are pessimistic or optimistic.  We have
   to take the pessimistic approach to assure the safety of
   speculative scheduling, but we can take the optimistic approach
   by invoking the -fsched_spec_load_dangerous option.  
   Whether a register movement is associated with a call.  
   The following enumeration values tell us what dependencies we
   should use to implement the barrier.  We use true-dependencies for
   TRUE_BARRIER and anti-dependencies for MOVE_BARRIER.  
   Represents the bits that can be set in the flags field of the
   sched_info structure.  
     If set, generate links between instruction as DEPS_LIST.
     Otherwise, generate usual INSN_LIST links.  
     Perform data or control (or both) speculation.
     Results in generation of data and control speculative dependencies.
     Requires USE_DEPS_LIST set.  
     Scheduler can possibly create new basic blocks.  Used for assertions.  
   Identificator of a scheduler pass.  
   The algorithm used to implement -fsched-pressure.  
   Offset for speculative weaknesses in dep_status.  

Function Documentation

void add_block ( basic_block  ,

Referenced by loe_visit_block().

void add_delay_dependencies ( rtx  )
void add_dependence ( rtx  ,
rtx  ,
enum  reg_note 
void advance_state ( state_t  )
rtx bb_note ( basic_block  )
void compute_priorities ( void  )
   Compute insn priority for a current region.  
void concat_note_lists ( rtx  ,
int contributes_to_priority ( rtx  ,
void debug_dependencies ( rtx  head,
rtx  tail 
   Print dependencies information for instructions between HEAD and TAIL.
   ??? This function would probably fit best in haifa-sched.c.  
void debug_ds ( ds_t  )
void debug_region ( int  )
void debug_regions ( void  )
   Functions for the construction of regions.  
   Print the regions, for debugging purposes.  Callable from debugger.  
         We don't have ebb_head initialized yet, so we can't use
         BB_TO_BLOCK ().  

References current_blocks, dump_bb(), and rgn_bb_table.

Referenced by free_rgn_deps().

void debug_rgn_dependencies ( int  )
int dep_cost ( dep_t  )
int dep_cost_1 ( dep_t  ,
void deps_analyze_insn ( struct deps_desc ,
void deps_join ( struct deps_desc ,
struct deps_desc  

Referenced by add_branch_dependences().

bool deps_pools_are_empty_p ( void  )
   Return true if there is no dep_nodes and deps_lists out there.
   After the region is scheduled all the dependency nodes and lists
   should [generally] be returned to pool.  
void deps_start_bb ( struct deps_desc ,
void discard_delay_pairs_above ( int  )
ds_t dk_to_ds ( enum  reg_note)
ds_t ds_full_merge ( ds_t  ,
ds_t  ,
rtx  ,
ds_t ds_get_max_dep_weak ( ds_t  )
ds_t ds_get_speculation_types ( ds_t  )
ds_t ds_max_merge ( ds_t  ,
ds_t ds_merge ( ds_t  ,
enum reg_note ds_to_dk ( ds_t  )
enum reg_note ds_to_dt ( ds_t  )
dw_t ds_weak ( ds_t  )
void dump_region_dot ( FILE *  ,
void dump_region_dot_file ( const char *  ,
void ebb_compute_jump_reg_dependencies ( rtx  ,
dw_t estimate_dep_weak ( rtx  ,
void extend_dependency_caches ( int  ,
void extend_regions ( void  )
   Extend internal data structures.  
void extend_rgns ( int *  ,
int *  ,
sbitmap  ,
int *   
edge find_fallthru_edge_from ( basic_block  )
void find_modifiable_mems ( rtx  ,
void finish_deps_global ( void  )
   Free everything used by the dependency analysis code.  
void free_delay_pairs ( void  )
   Free all delay_pair structures that were recorded.  

References create_check_block_twin(), nr_begin_control, and nr_begin_data.

void free_deps ( struct deps_desc )
void free_rgn_deps ( void  )
   Free all region dependencies saved in INSN_BACK_DEPS and
   INSN_RESOLVED_BACK_DEPS.  The Haifa scheduler does this on the fly
   when scheduling, so this function is supposed to be called from
   the selective scheduling only.  

References calculate_dominance_info(), CDI_DOMINATORS, debug_regions(), find_rgns(), free_dominance_info(), sched_verbose, and sel_sched_p().

dw_t get_dep_weak ( ds_t  ,
void get_ebb_head_tail ( basic_block  ,
basic_block  ,
rtx ,
rtx get_ready_element ( int  )
int get_rgn_sched_max_insns_priority ( void  )
   Returns maximum priority that an insn was assigned to.  
int haifa_classify_insn ( const_rtx  )
   Functions in haifa-sched.c.  
void haifa_finish_h_i_d ( void  )
   Finalize haifa_insn_data.  
void haifa_init_h_i_d ( bb_vec_t  )
void haifa_note_reg_clobber ( int  )
void haifa_note_reg_set ( int  )
void haifa_note_reg_use ( int  )
void haifa_sched_finish ( void  )
   Finish work with the data specific to the Haifa scheduler.  
     Finalize h_i_d, dependency caches, and luids for the whole
     function.  Target will be finalized in md_global_finish ().  

References add_to_speculative_block(), and begin_speculative_block().

void haifa_sched_init ( void  )
   Initialize data structures specific to the Haifa scheduler.  
     Initialize luids, dependency caches, target and h_i_d for the
     whole function.  

References dfa_state_size, choice_entry::state, and targetm.

Referenced by loop_canon_p().

void increase_insn_priority ( rtx  ,
void init_dep ( dep_t  ,
rtx  ,
rtx  ,
enum  reg_note 
void init_dep_1 ( dep_t  ,
rtx  ,
rtx  ,
enum  reg_note,
   Functions to work with dep.  
void init_deps ( struct deps_desc ,
void init_deps_global ( void  )
   Initialize some global variables needed by the dependency analysis

References get_dep_weak().

Referenced by add_inter_loop_mem_dep(), and code_motion_process_successors().

void init_deps_reg_last ( struct deps_desc )
void init_insn_reg_pressure_info ( rtx  )
int insn_cost ( rtx  )
int insn_luid ( rtx  )
int max_issue ( struct ready_list ready,
int  privileged_n,
state_t  state,
bool  first_cycle_insn_p,
int *  index 
   The following function returns maximal (or close to maximal) number
   of insns which can be issued on the same cycle and one of which
   insns is insns with the best rank (the first insn in READY).  To
   make this function tries different samples of ready insns.  READY
   is current queue `ready'.  Global array READY_TRY reflects what
   insns are already issued in this try.  The function stops immediately,
   if it reached the such a solution, that all instruction can be issued.
   INDEX will contain index of the best insn in READY.  The following
   function is used only for first cycle multipass scheduling.


   This function expects recognized insns only.  All USEs,
   CLOBBERs, etc must be filtered elsewhere.  
     Init max_points.  
     The number of the issued insns in the best solution.  
     Set initial state of the search.  
     Count the number of the insns to search among.  
     I is the index of the insn to try next.  
             or have nothing else to try...  
             or should not issue more.  
             ??? (... || i == n_ready).  
             We should not issue more than issue_rate instructions.  
                     Try to find issued privileged insn.  
                     Or a privileged insn will be issued.  
                   Then we have a solution.  
                     This is the index of the insn issued first in this
             Set ready-list index to point to the last insn
             ('i++' below will advance it to the next insn).  
                   We won't issue any more instructions in the next
                 Advance to the next choice_entry.  
                 Initialize it.  
         Increase ready-list index.  
     Restore the original state of the DFA.  

References asm_noperands(), clock_var, dfa_state_size, estimate_shadow_tick(), hash_table< Descriptor, Allocator >::find_with_hash(), hash_table< Descriptor, Allocator >::is_created(), memcpy(), choice_entry::n, ready_list::n_ready, delay_pair::next_same_i1, queue_insn(), ready_element(), ready_remove(), recog_memoized(), SCHED_PRESSURE_MODEL, and SCHED_PRESSURE_NONE.

void maybe_extend_reg_info_p ( void  )
   Extends REG_INFO_P if needed.  
     Extend REG_INFO_P, if needed.  

Referenced by mark_reg_death(), and undo_transformations().

int no_real_insns_p ( const_rtx  ,
int number_in_ready ( void  )
   Get number of ready insn in the ready list.  
rtx ready_element ( struct ready_list ,
rtx* ready_lastpos ( struct ready_list )
void ready_sort ( struct ready_list )
rtx real_insn_for_shadow ( rtx  )
void record_delay_slot_pair ( rtx  ,
rtx  ,
int  ,
void reemit_notes ( rtx  )
void remove_from_deps ( struct deps_desc ,
void remove_notes ( rtx  ,
rtx restore_other_notes ( rtx  ,
void rgn_make_new_region_out_of_new_block ( basic_block  )
void rgn_setup_common_sched_info ( void  )
   Setup scheduler infos.  

References RTL_PASS.

void rgn_setup_region ( int  )
void rgn_setup_sched_infos ( void  )
   Setup all *_sched_info structures (for the Haifa frontend
   and for the dependence analysis) in the interblock scheduler.  

Referenced by code_motion_process_successors().

void sched_analyze ( struct deps_desc ,
rtx  ,
void sched_change_pattern ( rtx  ,
basic_block sched_create_empty_bb_1 ( basic_block  )
basic_block sched_create_recovery_block ( basic_block )
void sched_create_recovery_edges ( basic_block  first_bb,
basic_block  rec,
basic_block  second_bb 
   and emit necessary jumps.  
     This is fixing of incoming edge.  
     ??? Which other flags should be specified?  
       Partition type is the same, if it is "unpartitioned".  
       Partition type is the same, if it is "unpartitioned".  
         Rewritten from cfgrtl.c.  
             We don't need the same note for the check because
             any_condjump_p (check) == true.  
void sched_deps_finish ( void  )
   Finalize dependency information for the whole function.  

References ds_merge(), estimate_dep_weak(), and set_dep_weak().

void sched_deps_init ( bool  )
rtx sched_emit_insn ( rtx  )
void sched_extend_luids ( void  )
   Extend data structures for logical insn UID.  
void sched_extend_ready_list ( int  )
void sched_extend_target ( void  )
   Extend per insn data in the target.  
void sched_finish ( void  )
   Free global data used during insn scheduling.  This function works with
   the common data shared between the schedulers.  

References sched_insn_is_legitimate_for_speculation_p().

void sched_finish_luids ( void  )
   Free LUIDs.  
void sched_finish_ready_list ( void  )
   Free per region data structures.  
void sched_free_deps ( rtx  ,
rtx  ,
rtx sched_get_reverse_condition_uncached ( const_rtx  )
   Functions in sched-deps.c.  
void sched_init ( void  )
   Initialize some global state for the scheduler.  This function works
   with the common data shared between all the schedulers.  It is called
   from the scheduler specific initialization routine.  
     Disable speculative loads in their presence if cc0 defined.  
     Initialize SPEC_INFO.  
           So we won't read anything accidentally.  
       So we won't read anything accidentally.  
     Initialize issue_rate.  
         To invalidate max_lookahead_tries:  
     More problems needed for interloop dep calculation in SMS.  
     Do not run DCE after reload, as this can kill nops inserted
     by bundling.  
           We need info about pseudos for rtl dumps about pseudo
           classes and costs.  
void sched_init_bbs ( void  )
   Init per basic block data structures.  

References DO_SPECULATION, and haifa_sched_info::flags.

void sched_init_insn_luid ( rtx  )
void sched_init_luids ( bb_vec_t  )
void sched_init_region_reg_pressure_info ( void  )
   Functions in haifa-sched.c.  
   Initiate register pressure relative info for scheduling the current
   region.  Currently it is only clearing register mentioned in the
   current region.  
bool sched_insn_is_legitimate_for_speculation_p ( const_rtx  ,
bool sched_insns_conditions_mutex_p ( const_rtx  ,
void sched_insns_finish ( void  )
void sched_insns_init ( rtx  )
bool sched_is_disabled_for_current_region_p ( void  )
   Returns true if all the basic blocks of the current region have
   NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region.  
void sched_rgn_compute_dependencies ( int  )
void sched_rgn_finish ( void  )
   Free data structures for region scheduling.  
     Reposition the prologue and epilogue notes in case we moved the
     prologue/epilogue insns.  
void sched_rgn_init ( bool  )
void sched_rgn_local_finish ( void  )
   Free data computed for the finished region.  
void sched_rgn_local_free ( void  )
   Free data computed for the finished region.  

References maybe_skip_selective_scheduling(), and run_selective_scheduling().

void sched_rgn_local_init ( int  )
void sched_setup_bb_reg_pressure_info ( basic_block  ,
int sched_speculate_insn ( rtx  ,
ds_t  ,
basic_block sched_split_block_1 ( basic_block  ,
bool schedule_block ( basic_block ,
basic_block schedule_ebb ( rtx  ,
rtx  ,
   Types and functions in sched-ebb.c.  
void schedule_ebbs_finish ( void  )
   Perform cleanups after scheduling using schedules_ebbs or schedule_ebb.  
     Reposition the prologue and epilogue notes in case we moved the
     prologue/epilogue insns.  
void schedule_ebbs_init ( void  )
   Perform initializations before running schedule_ebbs or a single
     Setup infos.  
     Initialize DONT_CALC_DEPS and ebb-{start, end} markers.  
void sd_add_dep ( dep_t  ,
enum DEPS_ADJUST_RESULT sd_add_or_update_dep ( dep_t  ,
void sd_copy_back_deps ( rtx  ,
rtx  ,
void sd_debug_dep ( dep_t  )
void sd_debug_lists ( rtx  ,
void sd_delete_dep ( sd_iterator_def  )
dep_t sd_find_dep_between ( rtx  ,
rtx  ,
void sd_finish_insn ( rtx  )
void sd_init_insn ( rtx  )
static bool sd_iterator_cond ( )
   Return the current element.  
           Switch to next list.  

Referenced by deps_analyze_insn(), and ds_get_speculation_types().

static void sd_iterator_next ( )
   Advance iterator.  

Referenced by save_reg_pressure().

static sd_iterator_def sd_iterator_start ( )
   ??? We can move some definitions that are used in below inline functions
   out of sched-int.h to sched-deps.c provided that the below functions will
   become global externals.
   These definitions include:
   * struct _deps_list: opaque pointer is needed at global scope.
   * struct _dep_link: opaque pointer is needed at scope of sd_iterator_def.
   * struct _dep_node: opaque pointer is needed at scope of
   struct _deps_link.  
   Return initialized iterator.  
     Some dep_link a pointer to which will return NULL.  
     Avoid 'uninitialized warning'.  

Referenced by deps_analyze_insn(), and ds_get_speculation_types().

bool sd_lists_empty_p ( const_rtx  ,
int sd_lists_size ( const_rtx  ,
void sd_next_list ( const_rtx  insn,
sd_list_types_def types_ptr,
deps_list_t list_ptr,
bool *  resolved_p_ptr 
   Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
   initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
   and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
   This function is used to switch sd_iterator to the next list.
   !!! For internal use only.  Might consider moving it to sched-int.h.  

References deps_list_empty_p(), and sd_next_list().

Referenced by sd_next_list().

void sd_resolve_dep ( sd_iterator_def  )
void sd_unresolve_dep ( sd_iterator_def  )
void sel_add_to_insn_priority ( rtx  insn,
int  amount 
   Increases effective priority for INSN by AMOUNT.  

References targetm.

void sel_find_rgns ( void  )
   Functions in sel-sched-ir.c.  
bool sel_insn_is_speculation_check ( rtx  )
void sel_mark_hard_insn ( rtx  )
static bool sel_sched_p ( )
   Return true if selective scheduling pass is working.  

Referenced by free_rgn_deps(), and mark_reg_death().

ds_t set_dep_weak ( ds_t  ,
ds_t  ,
void set_modulo_params ( int  ,
int  ,
int  ,
int set_priorities ( rtx  ,
void setup_sched_dump ( void  )
   Set dump and sched_verbose for the desired debugging output.  If no
   dump-file was specified, but -fsched-verbose=N (any N), print to stderr.
   For -fsched-verbose=N, N>=10, print everything to stderr.  
int try_ready ( rtx  )
void unlink_bb_notes ( basic_block  ,
void* xrecalloc ( void *  ,
size_t  ,
size_t  ,

Variable Documentation

int* block_to_bb
   Topological order of blocks in the region (if b2 is reachable from
   b1, block_to_bb[b2] > block_to_bb[b1]).  Note: A basic block is
   always referred to by either block or b, while its topological
   order name (in the region) is referred to by bb.  
struct common_sched_info_def* common_sched_info
   This is a placeholder for the scheduler parameters common
   to all schedulers.  

Referenced by dump_insn_location().

int* containing_rgn
   The number of the region containing a block.  
state_t curr_state
   Pointer to data describing the current DFA state.  
   The following variable value refers for all current and future
   reservations of the processor units.  

Referenced by create_composed_state(), and NDFA_to_DFA().

int current_blocks

Referenced by debug_regions().

int current_nr_blocks
   Blocks of the current region being scheduled.  

Referenced by code_motion_process_successors(), and mark_regno_birth_or_death().

struct haifa_sched_info* current_sched_info
   Point to state used for the current scheduling pass.  

Referenced by debug_ebb_dependencies(), dump_insn_location(), sched_free_deps(), sd_add_dep(), and sd_copy_back_deps().

int cycle_issued_insns
   The following variable value is number of essential insns issued on
   the current cycle.  An insn is essential one if it changes the
   processors state.  

Referenced by ok_for_early_queue_removal().

int dfa_lookahead
   This holds the value of the target dfa_lookahead hook.  

Referenced by early_queue_to_ready(), and ok_for_early_queue_removal().

size_t dfa_state_size
   The following variable value is size of memory representing all
   current and future reservations of the processor units.  

Referenced by early_queue_to_ready(), get_ebb_head_tail(), haifa_sched_init(), max_issue(), ok_for_early_queue_removal(), and remove_insns_that_need_bookkeeping().

int* ebb_head
   The mapping from ebb to block.  
   ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb.
   Currently we can get a ebb only through splitting of currently
   scheduling block, therefore, we don't need ebb_head array for every region,
   hence, its sufficient to hold it for current one only.  

Referenced by rgn_setup_region().

   The data is specific to the Haifa scheduler.  
   Haifa Instruction Data.  
struct common_sched_info_def haifa_common_sched_info
   Haifa version of sched_info hooks common to all headers.  

Referenced by dump_insn_location().

bool haifa_recovery_bb_ever_added_p
   True, if recovery block was added during this scheduling pass.
   Used to determine if we should have empty memory pools of dependencies
   after finishing current region.  
int issue_rate

Instruction scheduling pass. Copyright (C) 1992-2013 Free Software Foundation, Inc. Contributed by Michael Tiemann (tiema.nosp@m.nn@c.nosp@m.ygnus.nosp@m..com) Enhanced by, and currently maintained by, Jim Wilson (wilso.nosp@m.n@cy.nosp@m.gnus..nosp@m.com)

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see http://www.gnu.org/licenses/.

   Instruction scheduling pass.  This file, along with sched-deps.c,
   contains the generic parts.  The actual entry point is found for
   the normal instruction scheduling pass is found in sched-rgn.c.

   We compute insn priorities based on data dependencies.  Flow
   analysis only creates a fraction of the data-dependencies we must
   observe: namely, only those dependencies which the combiner can be
   expected to use.  For this pass, we must therefore create the
   remaining dependencies we need to observe: register dependencies,
   memory dependencies, dependencies to keep function calls in order,
   and the dependence between a conditional branch and the setting of
   condition codes are all dealt with here.

   The scheduler first traverses the data flow graph, starting with
   the last instruction, and proceeding to the first, assigning values
   to insn_priority as it goes.  This sorts the instructions
   topologically by data dependence.

   Once priorities have been established, we order the insns using
   list scheduling.  This works as follows: starting with a list of
   all the ready insns, and sorted according to priority number, we
   schedule the insn from the end of the list by placing its
   predecessors in the list according to their priority order.  We
   consider this insn scheduled by setting the pointer to the "end" of
   the list to point to the previous insn.  When an insn has no
   predecessors, we either queue it until sufficient time has elapsed
   or add it to the ready list.  As the instructions are scheduled or
   when stalls are introduced, the queue advances and dumps insns into
   the ready list.  When all insns down to the lowest priority have
   been scheduled, the critical path of the basic block has been made
   as short as possible.  The remaining insns are then scheduled in
   remaining slots.

   The following list shows the order in which we want to break ties
   among insns in the ready list:

   1.  choose insn with the longest path to end of bb, ties
   broken by
   2.  choose insn with least contribution to register pressure,
   ties broken by
   3.  prefer in-block upon interblock motion, ties broken by
   4.  prefer useful upon speculative motion, ties broken by
   5.  choose insn with largest control flow probability, ties
   broken by
   6.  choose insn with the least dependences upon the previously
   scheduled insn, or finally
   7   choose the insn which has the most insns dependent on it.
   8.  choose insn with lowest UID.

   Memory references complicate matters.  Only if we can be certain
   that memory references are not part of the data dependency graph
   (via true, anti, or output dependence), can we move operations past
   memory references.  To first approximation, reads can be done
   independently, while writes introduce dependencies.  Better
   approximations will yield fewer dependencies.

   Before reload, an extended analysis of interblock data dependences
   is required for interblock scheduling.  This is performed in
   compute_block_backward_dependences ().

   Dependencies set up by memory references are treated in exactly the
   same way as other dependencies, by using insn backward dependences
   INSN_BACK_DEPS.  INSN_BACK_DEPS are translated into forward dependences
   INSN_FORW_DEPS the purpose of forward list scheduling.

   Having optimized the critical path, we may have also unduly
   extended the lifetimes of some registers.  If an operation requires
   that constants be loaded into registers, it is certainly desirable
   to load those constants as early as necessary, but no earlier.
   I.e., it will not do to load up a bunch of registers at the
   beginning of a basic block only to use them at the end, if they
   could be loaded later, since this may result in excessive register

   Note that since branches are never in basic blocks, but only end
   basic blocks, this pass will not move branches.  But that is ok,
   since we can use GNU's delayed branch scheduling pass to take care
   of this case.

   Also note that no further optimizations based on algebraic
   identities are performed, so this pass would be a good one to
   perform instruction splitting, such as breaking up a multiply
   instruction into shifts and adds where that is profitable.

   Given the memory aliasing analysis that this pass should perform,
   it should be possible to remove redundant stores to memory, and to
   load values from registers instead of hitting memory.

   Before reload, speculative insns are moved only if a 'proof' exists
   that no exception will be caused by this, and if no live registers
   exist that inhibit the motion (live registers constraints are not
   represented by data dependence edges).

   This pass must update information that subsequent passes expect to
   be correct.  Namely: reg_n_refs, reg_n_sets, reg_n_deaths,
   reg_n_calls_crossed, and reg_live_length.  Also, BB_HEAD, BB_END.

   The information in the line number notes is carefully retained by
   this pass.  Notes that refer to the starting and ending of
   exception regions are also carefully retained by this pass.  All
   other NOTE insns are grouped in their same relative order at the
   beginning of basic blocks and regions that have been scheduled.  
   issue_rate is the number of insns that can be scheduled in the same
   machine cycle.  It can be defined in the config/mach/mach.h file,
   otherwise we set it to 1.  
rtx note_list
   This list holds ripped off notes from the current block.  These notes will
   be attached to the beginning of the block when its scheduling is
   List of important notes we must keep around.  This is a pointer to the
   last element in the list.  
int nr_regions
   Number of regions in the procedure.  
struct ready_list ready
   The ready list.  

Referenced by av_set_could_be_blocked_by_bookkeeping_p(), and vinsn_vec_add().

char* ready_try
   The following array is used to find the best insn from ready when
   the automaton pipeline interface is used.  

Referenced by av_set_could_be_blocked_by_bookkeeping_p(), early_queue_to_ready(), insn_finishes_cycle_p(), and ok_for_early_queue_removal().

int* rgn_bb_table
   Array of lists of regions' blocks.  

Referenced by debug_regions(), and rgn_setup_region().

region* rgn_table
   Table of region descriptions.  
basic_block(* sched_create_empty_bb)(basic_block)
   Create empty basic block after the specified block.  
struct sched_deps_info_def* sched_deps_info
   Holds current parameters for the dependency analyzer.  

Referenced by dep_cost_1(), and dump_insn_location().

FILE* sched_dump
   Debugging file.  All printouts are sent to dump, which is always set,
   either to stderr, or to the dump listing file (-dRS).  

Referenced by code_motion_process_successors(), ds_get_max_dep_weak(), dump_ds(), dump_flist(), setup_ref_regs(), and switch_dump().

int sched_emulate_haifa_p
   True if during selective scheduling we need to emulate some of haifa
   scheduler behaviour.  
   True if/when we want to emulate Haifa scheduler in the common code.
   This is used in sched_rgn_local_init and in various places in
void(* sched_init_only_bb)(basic_block, basic_block)
vec<int> sched_luids
   Mapping from INSN_UID to INSN_LUID.  In the end all other per insn data
   structures should be indexed by luid.  
   Mapping from instruction UID to its Logical UID.  
int sched_max_luid
   The highest INSN_LUID.  
   Next LUID to assign to an instruction.  
bool sched_no_dce
   This can be set to true by a backend if the scheduler should not
   enable a DCE pass.  
enum sched_pressure_algorithm sched_pressure
   Do register pressure sensitive insn scheduling if the flag is set

Referenced by mark_insn_hard_regno_birth().

enum reg_class* sched_regno_pressure_class
   Map regno -> its pressure class.  The map defined only when
   SCHED_PRESSURE_P is true.  
   Map regno -> its pressure class.  The map defined only when
basic_block(* sched_split_block)(basic_block, rtx)
   Split block function.  Different schedulers might use different functions
   to handle their internal data consistent.  
int sched_verbose
   sched-verbose controls the amount of debugging output the
   scheduler prints.  It is controlled by -fsched-verbose=N:
   N>0 and no -DSR : the output is directed to stderr.
   N>=10 will direct the printouts to stderr (regardless of -dSR).
   N=1: same as -dSR.
   N=2: bb's probabilities, detailed ready list info, unit/insn info.
   N=3: rtl at abort point, control-flow, regions info.
   N=5: dependences info.  

Referenced by advance_one_cycle(), code_motion_path_driver_cleanup(), code_motion_process_successors(), commit_schedule(), debug_ebb_dependencies(), ds_get_max_dep_weak(), dump_ds(), equal_after_moveup_path_p(), estimate_insn_tick(), free_rgn_deps(), get_ebb_head_tail(), maybe_emit_renaming_copy(), model_last_use_except(), model_promote_insn(), moveup_set_expr(), moving_insn_creates_bookkeeping_block_p(), need_nop_to_preserve_insn_bb(), rank_for_schedule(), remove_insn_from_stream(), sel_region_init(), and undo_transformations().

spec_info_t spec_info
   Description of the speculative part of the scheduling.
   If NULL - no speculation.  

Referenced by collect_unavailable_regs_from_bnds(), compute_live_below_insn(), and dep_cost_1().

int target_bb
   The bb being currently scheduled.  

Referenced by init_ready_list(), and is_prisky().