GCC Middle and Back End API Reference
sched-deps.c File Reference
#include "tm_p.h"
#include "hard-reg-set.h"
#include "regs.h"
#include "function.h"
Include dependency graph for sched-deps.c:

Data Structures

struct  mem_inc_info

Functions

enum reg_note ds_to_dk ()
ds_t dk_to_ds ()
void init_dep_1 ()
void init_dep ()
static void copy_dep ()
static void dump_ds (FILE *, ds_t)
static void dump_dep ()
void sd_debug_dep ()
static bool depl_on_debug_p ()
static void attach_dep_link ()
static void add_to_deps_list ()
static void detach_dep_link ()
static void remove_from_deps_list ()
static void move_dep_link ()
static bool dep_link_is_detached_p ()
static dep_node_t create_dep_node ()
static void delete_dep_node ()
static bool deps_list_empty_p ()
static deps_list_t create_deps_list ()
static void free_deps_list ()
bool deps_pools_are_empty_p ()
static void clear_deps_list ()
static bool dep_spec_p ()
static int deps_may_trap_p (const_rtx)
static void add_dependence_1 (rtx, rtx, enum reg_note)
static void add_dependence_list (rtx, rtx, int, enum reg_note, bool)
static void add_dependence_list_and_free (struct deps_desc *, rtx, rtx *, int, enum reg_note, bool)
static void delete_all_dependences (rtx)
static void chain_to_prev_insn (rtx)
static void flush_pending_lists (struct deps_desc *, rtx, int, int)
static void sched_analyze_1 (struct deps_desc *, rtx, rtx)
static void sched_analyze_2 (struct deps_desc *, rtx, rtx)
static void sched_analyze_insn (struct deps_desc *, rtx, rtx)
static bool sched_has_condition_p (const_rtx)
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool)
static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool, rtx, rtx)
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx)
static void check_dep (dep_t, bool)
static int deps_may_trap_p ()
static rtx sched_get_condition_with_rev_uncached ()
rtx sched_get_reverse_condition_uncached ()
static rtx sched_get_condition_with_rev ()
static bool sched_has_condition_p ()
static int conditions_mutex_p ()
bool sched_insns_conditions_mutex_p ()
bool sched_insn_is_legitimate_for_speculation_p ()
void sd_next_list (const_rtx insn, sd_list_types_def *types_ptr, deps_list_t *list_ptr, bool *resolved_p_ptr)
int sd_lists_size ()
bool sd_lists_empty_p ()
void sd_init_insn ()
void sd_finish_insn ()
static dep_t sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p, sd_iterator_def *sd_it_ptr)
dep_t sd_find_dep_between ()
static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 ()
static enum DEPS_ADJUST_RESULT ask_dependency_caches ()
static void set_dependency_caches ()
static void update_dependency_caches ()
static void change_spec_dep_to_hard ()
static enum DEPS_ADJUST_RESULT update_dep (dep_t dep, dep_t new_dep, sd_iterator_def sd_it, rtx mem1, rtx mem2)
static void get_back_and_forw_lists (dep_t dep, bool resolved_p, deps_list_t *back_list_ptr, deps_list_t *forw_list_ptr)
void sd_add_dep ()
enum DEPS_ADJUST_RESULT sd_add_or_update_dep ()
void sd_resolve_dep ()
void sd_unresolve_dep ()
void sd_copy_back_deps ()
void sd_delete_dep ()
static void dump_lists ()
void sd_debug_lists ()
void add_dependence ()
static int remove_from_dependence_list ()
static int remove_from_both_dependence_lists ()
static void delete_all_dependences ()
static void chain_to_prev_insn ()
static void add_insn_mem_dependence (struct deps_desc *deps, bool read_p, rtx insn, rtx mem)
static void haifa_start_insn ()
static void haifa_finish_insn ()
void haifa_note_reg_set ()
void haifa_note_reg_clobber ()
void haifa_note_reg_use ()
static void haifa_note_mem_dep ()
static void haifa_note_dep ()
static void note_reg_use ()
static void note_reg_set ()
static void note_reg_clobber ()
static void note_mem_dep ()
static void note_dep ()
enum reg_note ds_to_dt ()
static struct reg_use_datacreate_insn_reg_use ()
static struct reg_set_datacreate_insn_reg_set ()
static void setup_insn_reg_uses ()
static bool insn_use_p ()
static void mark_insn_pseudo_birth ()
static void mark_insn_hard_regno_birth (rtx insn, int regno, int nregs, bool clobber_p, bool unused_p)
static void mark_insn_reg_birth ()
static void mark_pseudo_death ()
static void mark_hard_regno_death ()
static void mark_reg_death ()
static void mark_insn_reg_store ()
static void mark_insn_reg_clobber ()
void init_insn_reg_pressure_info ()
static void extend_deps_reg_info ()
void maybe_extend_reg_info_p ()
static void sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode, enum rtx_code ref, rtx insn)
static void sched_analyze_1 ()
static void sched_analyze_2 ()
static void sched_analyze_insn ()
static bool call_may_noreturn_p ()
static bool chain_to_prev_insn_p ()
void deps_analyze_insn ()
void deps_start_bb ()
void sched_analyze ()
static void delete_dep_nodes_in_back_deps ()
void sched_free_deps ()
void init_deps ()
void init_deps_reg_last ()
void free_deps ()
void remove_from_deps ()
static void init_deps_data_vector ()
void sched_deps_init ()
void extend_dependency_caches ()
void sched_deps_finish ()
void init_deps_global ()
void finish_deps_global ()
dw_t estimate_dep_weak ()
static void add_dependence_1 ()
static dw_t get_dep_weak_1 ()
dw_t get_dep_weak ()
ds_t set_dep_weak ()
static ds_t ds_merge_1 ()
ds_t ds_merge ()
ds_t ds_full_merge ()
ds_t ds_max_merge ()
dw_t ds_weak ()
ds_t ds_get_speculation_types ()
ds_t ds_get_max_dep_weak ()
static void dump_ds ()
DEBUG_FUNCTION void debug_ds ()
static void check_dep ()
static rtx attempt_change ()
static bool parse_add_or_inc ()
static bool find_inc ()
static bool find_mem ()
void find_modifiable_mems ()

Variables

struct sched_deps_info_defsched_deps_info
vec< haifa_deps_insn_data_defh_d_i_d = vNULL
static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON)
static alloc_pool dn_pool
static int dn_pool_diff = 0
static alloc_pool dl_pool
static int dl_pool_diff = 0
static regset reg_pending_sets
static regset reg_pending_clobbers
static regset reg_pending_uses
static regset reg_pending_control_uses
static enum
reg_pending_barrier_mode 
reg_pending_barrier
static HARD_REG_SET implicit_reg_pending_clobbers
static HARD_REG_SET implicit_reg_pending_uses
static bitmap_headtrue_dependency_cache = NULL
static bitmap_headoutput_dependency_cache = NULL
static bitmap_headanti_dependency_cache = NULL
static bitmap_headcontrol_dependency_cache = NULL
static bitmap_headspec_dependency_cache = NULL
static int cache_size
static bool mark_as_hard
static rtx cur_insn = NULL_RTX
static struct reg_pressure_data reg_pressure_info [N_REG_CLASSES]
static bool can_start_lhs_rhs_p

Function Documentation

void add_dependence ( )
   A wrapper around add_dependence_1, to add a dependence of CON on
   PRO, with type DEP_TYPE.  This function implements special handling
   for REG_DEP_CONTROL dependencies.  For these, we optionally promote
   the type to REG_DEP_ANTI if we can determine that predication is
   impossible; otherwise we add additional true dependencies on the
   INSN_COND_DEPS list of the jump (which PRO must be).  
     A REG_DEP_CONTROL dependence may be eliminated through predication,
     so we must also make the insn dependent on the setter of the
     condition.  
         Verify that the insn does not use a different value in
         the condition register than the one that was present at
         the jump.  

Referenced by discard_delay_pairs_above().

static void add_dependence_1 ( rtx  ,
rtx  ,
enum  reg_note 
)
static

Referenced by debug_ds().

static void add_dependence_1 ( )
static
   Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
   This function can handle same INSN and ELEM (INSN == ELEM).
   It is a convenience wrapper.  
     When add_dependence is called from inside sched-deps.c, we expect
     cur_insn to be non-null.  
static void add_dependence_list ( rtx  insn,
rtx  list,
int  uncond,
enum reg_note  dep_type,
bool  hard 
)
static
   A convenience wrapper to operate on an entire list.  HARD should be
   true if DEP_NONREG should be set on newly created dependencies.  
static void add_dependence_list_and_free ( struct deps_desc deps,
rtx  insn,
rtx listp,
int  uncond,
enum reg_note  dep_type,
bool  hard 
)
static
   Similar, but free *LISTP at the same time, when the context
   is not readonly.  HARD should be true if DEP_NONREG should be set on
   newly created dependencies.  
     We don't want to short-circuit dependencies involving debug
     insns, because they may cause actual dependencies to be
     disregarded.  

References deps_desc::readonly.

static void add_insn_mem_dependence ( struct deps_desc deps,
bool  read_p,
rtx  insn,
rtx  mem 
)
static
   Process an insn's memory dependencies.  There are four kinds of
   dependencies:

   (0) read dependence: read follows read
   (1) true dependence: read follows write
   (2) output dependence: write follows write
   (3) anti dependence: write follows read

   We are careful to build only dependencies which actually exist, and
   use transitivity to avoid building too many links.  
   Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
   The MEM is a memory reference contained within INSN, which we are saving
   so that we can do memory aliasing on it.  

Referenced by sched_analyze_2().

static enum DEPS_ADJUST_RESULT add_or_update_dep_1 ( dep_t  new_dep,
bool  resolved_p,
rtx  mem1,
rtx  mem2 
)
static
   Add or update  a dependence described by DEP.
   MEM1 and MEM2, if non-null, correspond to memory locations in case of
   data speculation.

   The function returns a value indicating if an old entry has been changed
   or a new entry has been added to insn's backward deps or nothing has
   been updated at all.  
     Check that we don't already have this dependence.  
           We found an existing dependency between ELEM and INSN.  
           We didn't find a dep, it shouldn't present in the cache.  
     Might want to check one level of transitivity to save conses.
     This check should be done in maybe_add_or_update_dep_1.
     Since we made it to add_or_update_dep_1, we must create
     (or update) a link.  
static void add_to_deps_list ( )
static
   Add dep_link LINK to deps_list L.  
     Don't count debug deps.  

References depl_on_debug_p(), and detach_dep_link().

Referenced by detach_dep_link().

static enum DEPS_ADJUST_RESULT ask_dependency_caches ( )
static
   Ask dependency caches what needs to be done for dependence DEP.
   Return DEP_CREATED if new dependence should be created and there is no
   need to try to find one searching the dependencies lists.
   Return DEP_PRESENT if there already is a dependence described by DEP and
   hence nothing is to be done.
   Return DEP_CHANGED if there already is a dependence, but it should be
   updated to incorporate additional information from DEP.  
           There is no existing dep so it should be created.  
           DEP does not add anything to the existing dependence.  
           There is no existing dep so it should be created.  
               DEP does not add anything to the existing dependence.  
             Only true dependencies can be data speculative and
             only anti dependencies can be control speculative.  
             if (DEP is SPECULATIVE) then
             ..we should update DEP_STATUS
             else
             ..we should reset existing dep to non-speculative.  

References bitmap_set_bit().

Referenced by update_dep().

static void attach_dep_link ( )
static
   Functions to operate with a single link from the dependencies lists -
   dep_link_t.  
   Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
   PREV_NEXT_P.  
     Init node being inserted.  
     Fix next node.  
     Fix prev node.  
static rtx attempt_change ( )
static
   Verify that the memory location described in MII can be replaced with
   one using NEW_ADDR.  Return the new memory reference or NULL_RTX.  The
   insn remains unchanged by this function.  
     Jump through a lot of hoops to keep the attributes up to date.  We
     do not want to call one of the change address variants that take
     an offset even though we know the offset in many cases.  These
     assume you are changing where the address is pointing by the
     offset.  
     Put back the old one.  
static bool call_may_noreturn_p ( )
static
   Return TRUE if INSN might not always return normally (e.g. call exit,
   longjmp, loop forever, ...).  
   FIXME: Why can't this function just use flags_from_decl_or_type and
   test for ECF_NORETURN?  
     const or pure calls that aren't looping will always return.  
                   Assume certain string/memory builtins always return.  
     For all other calls assume that they might not always return.  
static void chain_to_prev_insn ( rtx  )
static
static void chain_to_prev_insn ( )
static
   All insns in a scheduling group except the first should only have
   dependencies on the previous insn in the group.  So we find the
   first instruction in the scheduling group by walking the dependence
   chains backwards. Then we add the dependencies for the group to
   the previous nonnote insn.  
static bool chain_to_prev_insn_p ( )
static
   Return true if INSN should be made dependent on the previous instruction
   group, and if all INSN's dependencies should be moved to the first
   instruction of that group.  
     INSN forms a group with the previous instruction.  
     If the previous instruction clobbers a register R and this one sets
     part of R, the clobber was added specifically to help us track the
     liveness of R.  There's no point scheduling the clobber and leaving
     INSN behind, especially if we move the clobber to another block.  

References cselib_finish(), cselib_init(), CSELIB_RECORD_MEMORY, deps_analyze_insn(), deps_start_bb(), sd_init_insn(), and sched_deps_info_def::use_cselib.

static void change_spec_dep_to_hard ( )
static
   Convert a dependence pointed to by SD_IT to be non-speculative.  
       Clear the cache entry.  
static void check_dep ( dep_t  ,
bool   
)
static
static void check_dep ( )
static
   Verify that dependence type and status are consistent.
   If RELAXED_P is true, then skip dep_weakness checks.  
     Check that dependence type contains the same bits as the status.  
     HARD_DEP can not appear in dep_status of a link.  
     Check that dependence status is set correctly when speculation is not
     supported.  
             Check that dependence weakness is in proper range.  
             Only true dependence can be data speculative.  
             Control dependencies in the insn scheduler are represented by
             anti-dependencies, therefore only anti dependence can be
             control speculative.  
             Subsequent speculations should resolve true dependencies.  
         Check that true and anti dependencies can't have other speculative
         statuses.  
         An output dependence can't be speculative at all.  
static void clear_deps_list ( )
static
   Remove all elements from L.  
static int conditions_mutex_p ( const_rtx  ,
const_rtx  ,
bool  ,
bool   
)
static
static int conditions_mutex_p ( )
static
   Return nonzero if conditions COND1 and COND2 can never be both true.  

References may_trap_or_fault_p().

static void copy_dep ( )
static
   Make a copy of FROM in TO.  
static dep_node_t create_dep_node ( )
static
   Create a dep_node.  
static deps_list_t create_deps_list ( )
static
   Create a new deps_list.  
static struct reg_set_data* create_insn_reg_set ( )
staticread
   Allocate and return reg_set_data structure for REGNO and INSN.  
static struct reg_use_data* create_insn_reg_use ( )
staticread
   Functions for computation of info needed for register pressure
   sensitive insn scheduling.  
   Allocate and return reg_use_data structure for REGNO and INSN.  

Referenced by haifa_note_mem_dep().

DEBUG_FUNCTION void debug_ds ( )
static void delete_all_dependences ( rtx  )
static
static void delete_all_dependences ( )
static
   Clear all dependencies for an insn.  
     The below cycle can be optimized to clear the caches and back_deps
     in one call but that would provoke duplication of code from
     delete_dep ().  
static void delete_dep_node ( )
static
   Delete dep_node N.  N must not be connected to any deps_list.  

Referenced by deps_analyze_insn(), and sd_add_dep().

static void delete_dep_nodes_in_back_deps ( )
static
   Helper for sched_free_deps ().
   Delete INSN's (RESOLVED_P) backward dependencies.  
static bool dep_link_is_detached_p ( )
static
   Return true of LINK is not attached to any list.  
static bool dep_spec_p ( )
static
   Decide whether a dependency should be treated as a hard or a speculative
   dependency.  
static bool depl_on_debug_p ( )
inlinestatic
   Determine whether DEP is a dependency link of a non-debug insn on a
   debug insn.  

Referenced by add_to_deps_list().

void deps_analyze_insn ( )
   Analyze INSN with DEPS as a context.  
     Record the condition for this insn.  
         Make each JUMP_INSN (but not a speculative check)
         a scheduling barrier for memory references.  
             Keep the list a reasonable size.  
         For each insn which shouldn't cross a jump, add a dependence.  
             This is setjmp.  Assume that all registers, not just
             hard registers, may be clobbered by this call.  
               A call may read and modify global register variables.  
             Other call-clobbered hard regs may be clobbered.
             Since we only have a choice between 'might be clobbered'
             and 'definitely not clobbered', we must include all
             partly call-clobbered registers here.  
             We don't know what set of fixed registers might be used
             by the function, but it is certain that the stack pointer
             is among them, but be conservative.  
             The frame pointer is normally not used by the function
             itself, but by the debugger.  
             ??? MIPS o32 is an exception.  It uses the frame pointer
             in the macro expansion of jal but does not represent this
             fact in the call_insn rtl.  
         For each insn which shouldn't cross a call, add a dependence
         between that insn and this call insn.  
         If CALL would be in a sched group, then this will violate
         convention that sched group insns have dependencies only on the
         previous instruction.

         Of course one can say: "Hey!  What about head of the sched group?"
         And I will answer: "Basic principles (one dep per insn) are always
         the same."  
         In the absence of interprocedural alias analysis, we must flush
         all pending reads and writes, and start new dependencies starting
         from here.  But only flush writes for constant calls (which may
         be passed a pointer to something we haven't written yet).  
             Remember the last function call for limiting lifetimes.  
                 Remember the last function call that might not always return
                 normally for limiting moves of trapping insns.  
             Before reload, begin a post-call group, so as to keep the
             lifetimes of hard registers correct.  
     Fixup the dependencies in the sched group.  

References delete_dep_node(), get_back_and_forw_lists(), _sd_iterator::linkp, remove_from_deps_list(), sd_iterator_cond(), and sd_iterator_start().

Referenced by chain_to_prev_insn_p().

static bool deps_list_empty_p ( )
static
   Functions to operate with dependences lists - deps_list_t.  
   Return true if list L is empty.  

Referenced by sd_next_list().

static int deps_may_trap_p ( const_rtx  )
static
static int deps_may_trap_p ( )
static
   Return nonzero if a load of the memory reference MEM can cause a trap.  
bool deps_pools_are_empty_p ( void  )
   Return true if there is no dep_nodes and deps_lists out there.
   After the region is scheduled all the dependency nodes and lists
   should [generally] be returned to pool.  
void deps_start_bb ( )
   Initialize DEPS for the new block beginning with HEAD.  
     Before reload, if the previous block ended in a call, show that
     we are inside a post-call group, so as to keep the lifetimes of
     hard registers correct.  

Referenced by chain_to_prev_insn_p().

static void detach_dep_link ( )
static
   Detach dep_link L from the list.  

References add_to_deps_list(), and remove_from_deps_list().

Referenced by add_to_deps_list().

ds_t dk_to_ds ( )
   Return equivalent dep_status.  
ds_t ds_full_merge ( )
   Return the join of two dep_statuses DS1 and DS2.  
           Then this dep can't be speculative.  
             Both are speculative.  Merging probabilities.  
ds_t ds_get_max_dep_weak ( )
   Return a dep status that contains maximal weakness for each speculation
   type present in DS.  

References mem_inc_info::inc_input, mem_inc_info::mem_insn, reg_overlap_mentioned_p(), sched_dump, and sched_verbose.

ds_t ds_get_speculation_types ( )
   Return a dep status that contains all speculation types of DS.  

References _sd_iterator::linkp, mem_inc_info::mem_insn, sd_iterator_cond(), and sd_iterator_start().

ds_t ds_max_merge ( )
   Return the join of DS1 and DS2.  Use maximum instead of multiplying
   probabilities.  

References mem_inc_info::inc_input, and mem_inc_info::inc_insn.

ds_t ds_merge ( )
   Return the join of two dep_statuses DS1 and DS2.
   This function assumes that both DS1 and DS2 contain speculative bits.  

Referenced by sched_deps_finish(), and update_dependency_caches().

static ds_t ds_merge_1 ( )
static
   Return the join of two dep_statuses DS1 and DS2.
   If MAX_P is true then choose the greater probability,
   otherwise multiply probabilities.
   This function assumes that both DS1 and DS2 contain speculative bits.  
enum reg_note ds_to_dk ( )
   Return the major type present in the DS.  
enum reg_note ds_to_dt ( )
   Return corresponding to DS reg_note.  
dw_t ds_weak ( )
   Return the probability of speculation success for the speculation
   status DS.  

Referenced by compute_live_below_insn().

static void dump_dep ( )
static
   Dump DEP to DUMP.
   FLAGS is a bit mask specifying what information about DEP needs
   to be printed.
   If FLAGS has the very first bit set, then dump all information about DEP
   and propagate this bit into the callee dump functions.  

Referenced by sd_unresolve_dep().

static void dump_ds ( FILE *  ,
ds_t   
)
static
static void dump_ds ( )
static
   Dump information about the dependence status S.  

References sched_dump, and sched_verbose.

static void dump_lists ( )
static
   Dump deps_lists of INSN specified by TYPES to DUMP.
   FLAGS is a bit mask specifying what information about the lists needs
   to be printed.
   If FLAGS has the very first bit set, then dump all information about
   the lists and propagate this bit into the callee dump functions.  
dw_t estimate_dep_weak ( )
   Estimate the weakness of dependence between MEM1 and MEM2.  
       MEMs are the same - don't speculate.  
       Again, MEMs are the same.  
       Different addressing modes - reason to be more speculative,
       than usual.  
       We can't say anything about the dependence.  

Referenced by sched_deps_finish(), and update_dependency_caches().

void extend_dependency_caches ( )
   Create or extend (depending on CREATE_P) dependency caches to
   size N.  
static void extend_deps_reg_info ( )
static
   Extend reg info for the deps context DEPS given that
   we have just generated a register numbered REGNO.  
     In a readonly context, it would not hurt to extend info,
     but it should not be needed.  

Referenced by mark_reg_death().

static bool find_inc ( )
static
   Once a suitable mem reference has been found and the corresponding data
   in MII has been filled in, this function is called to find a suitable
   add or inc insn involving the register we found in the memory
   reference.  
             Need to assure that none of the operands of the inc
             instruction are assigned to by the mem insn.  
static bool find_mem ( )
static
   A recursive function that walks ADDRESS_OF_X to find memory references
   which could be modified during scheduling.  We call find_inc for each
   one we find that has a recognizable form.  MII holds information about
   the pair of memory/increment instructions.
   We ensure that every instruction with a memory reference (which will be
   the location of the replacement) is assigned at most one breakable
   dependency.  
             Make sure this reg appears only once in this insn.  Can't use
             count_occurrences since that only works for pseudos.  
         If REG occurs inside a MEM used in a bit-field reference,
         that is unacceptable.  
     Time for some deep diving.  
void find_modifiable_mems ( )
   Examine the instructions between HEAD and TAIL and try to find
   dependencies that can be broken by modifying one of the patterns.  
void finish_deps_global ( void  )
   Free everything used by the dependency analysis code.  
static void flush_pending_lists ( struct deps_desc deps,
rtx  insn,
int  for_read,
int  for_write 
)
static
   Make a dependency between every memory reference on the pending lists
   and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
   dependencies for a read operation, similarly with FOR_WRITE.  
void free_deps ( )
   Free insn lists found in DEPS.  
     We set max_reg to 0 when this context was already freed.  
     Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
     times.  For a testcase with 42000 regs and 8000 small basic blocks,
     this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  
     As we initialize reg_last lazily, it is possible that we didn't allocate
     it at all.  
static void free_deps_list ( )
static
   Free deps_list L.  
static void get_back_and_forw_lists ( dep_t  dep,
bool  resolved_p,
deps_list_t back_list_ptr,
deps_list_t forw_list_ptr 
)
static
   Initialize BACK_LIST_PTR with consumer's backward list and
   FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
   initialize with lists that hold resolved deps.  

Referenced by deps_analyze_insn(), and sd_add_dep().

dw_t get_dep_weak ( )
   Return weakness of speculative type TYPE in the dep_status DS.  

Referenced by init_deps_global().

static dw_t get_dep_weak_1 ( )
static
   Return weakness of speculative type TYPE in the dep_status DS,
   without checking to prevent ICEs on malformed input.  
static void haifa_finish_insn ( )
static

Referenced by init_deps_reg_last().

static void haifa_note_dep ( )
static

Referenced by init_deps_reg_last().

static void haifa_note_mem_dep ( )
static
void haifa_note_reg_clobber ( )

Referenced by init_deps_reg_last().

void haifa_note_reg_set ( )

Referenced by init_deps_reg_last().

void haifa_note_reg_use ( )

Referenced by init_deps_reg_last().

static void haifa_start_insn ( )
static
   Implement hooks for haifa scheduler.  

Referenced by init_deps_reg_last().

void init_dep ( )
   Init DEP with the arguments.
   While most of the scheduler (including targets) only need the major type
   of the dependency, it is convenient to hide full dep_status from them.  

Referenced by try_ready().

void init_dep_1 ( )
   Functions to operate with dependence information container - dep_t.  
   Init DEP with the arguments.  
void init_deps ( )
   Initialize variables for region data dependence analysis.
   When LAZY_REG_LAST is true, do not allocate reg_last array
   of struct deps_desc immediately.  

Referenced by add_inter_loop_mem_dep().

static void init_deps_data_vector ( )
static
   Init deps data vector.  
void init_deps_global ( void  )
   Initialize some global variables needed by the dependency analysis
   code.  

References get_dep_weak().

Referenced by add_inter_loop_mem_dep(), and code_motion_process_successors().

void init_insn_reg_pressure_info ( )
   Set up reg pressure info related to INSN.  
static bool insn_use_p ( )
static
   Return TRUE if INSN has the use structure for REGNO.  
static void mark_hard_regno_death ( )
static
   Like mark_pseudo_death except that NREGS saying how many hard
   registers involved in the death.  

Referenced by mark_insn_pseudo_birth().

static void mark_insn_hard_regno_birth ( rtx  insn,
int  regno,
int  nregs,
bool  clobber_p,
bool  unused_p 
)
static
static void mark_insn_pseudo_birth ( )
static
   Update the register pressure info after birth of pseudo register REGNO
   in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
   the register is in clobber or unused after the insn.  

References mark_hard_regno_death(), mark_pseudo_death(), and reg_use_data::regno.

static void mark_insn_reg_birth ( )
static
   Update the register pressure info after birth of pseudo or hard
   register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
   correspondingly that the register is in clobber or unused after the
   insn.  
static void mark_insn_reg_clobber ( )
static
   Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  

References note_reg_clobber().

Referenced by mark_insn_hard_regno_birth().

static void mark_insn_reg_store ( )
static
   Process SETTER of REG.  DATA is an insn containing the setter.  

References note_reg_set().

Referenced by mark_insn_hard_regno_birth().

static void mark_pseudo_death ( )
static
   Update the register pressure info after death of pseudo register
   REGNO.  

Referenced by mark_insn_pseudo_birth().

static void mark_reg_death ( )
static
   Update the register pressure info after death of pseudo or hard
   register REG.  

References extend_deps_reg_info(), deps_desc::max_reg, max_reg_num(), maybe_extend_reg_info_p(), reload_completed, and sel_sched_p().

Referenced by mark_insn_hard_regno_birth().

static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 ( dep_t  ,
bool  ,
rtx  ,
rtx   
)
static
static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 ( )
static
   Add or update  a dependence described by DEP.
   MEM1 and MEM2, if non-null, correspond to memory locations in case of
   data speculation.

   The function returns a value indicating if an old entry has been changed
   or a new entry has been added to insn's backward deps.

   This function merely checks if producer and consumer is the same insn
   and doesn't create a dep in this case.  Actual manipulation of
   dependence data structures is performed in add_or_update_dep_1.  
     Don't depend an insn on itself.  
           INSN has an internal dependence, which we can't overcome.  

References DEP_PRESENT.

void maybe_extend_reg_info_p ( void  )
   Extends REG_INFO_P if needed.  
     Extend REG_INFO_P, if needed.  

Referenced by mark_reg_death(), and undo_transformations().

static void move_dep_link ( )
static
   Move link LINK from list FROM to list TO.  

Referenced by set_dependency_caches().

static void note_dep ( )
static
static void note_mem_dep ( )
static
static void note_reg_clobber ( )
static

Referenced by mark_insn_reg_clobber().

static void note_reg_set ( )
static

Referenced by mark_insn_reg_store().

static void note_reg_use ( )
static
static bool parse_add_or_inc ( )
static
   Return true if INSN is of a form "a = b op c" where a and b are
   regs.  op is + if c is a reg and +|- if c is a const.  Fill in
   informantion in MII about what is found.
   BEFORE_MEM indicates whether the increment is found before or after
   a corresponding memory reference.  
     Result must be single reg.  
         Note that the sign has already been reversed for !before_mem.  
static int remove_from_both_dependence_lists ( )
static
   Same as above, but process two lists at once.  
static int remove_from_dependence_list ( )
static
   Remove all occurrences of INSN from LIST.  Return the number of
   occurrences removed.  

References deps_desc::pending_read_insns, deps_desc::pending_read_list_length, and deps_desc::pending_read_mems.

void remove_from_deps ( )
   Remove INSN from dependence contexts DEPS.  
static void remove_from_deps_list ( )
static
   Remove link LINK from list LIST.  
     Don't count debug deps.  

References pool_alloc().

Referenced by deps_analyze_insn(), detach_dep_link(), and sd_add_dep().

void sched_analyze ( )
   Analyze every insn between HEAD and TAIL inclusive, creating backward
   dependencies for each insn.  
             And initialize deps_lists.  

Referenced by add_inter_loop_mem_dep().

static void sched_analyze_1 ( struct deps_desc ,
rtx  ,
rtx   
)
static
static void sched_analyze_1 ( )
static
   Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
   rtx, X, creating all dependencies generated by the write to the
   destination of X, and reads of everything mentioned.  
             These both read and modify the result.  We must handle
             them as writes to get proper dependencies for following
             instructions.  We must handle them as reads to get proper
             dependencies from this to previous instructions.
             Thus we need to call sched_analyze_2.  
             The second and third arguments are values read by this insn.  
         Treat all writes to a stack register as modifying the TOS.  
             Avoid analyzing the same register twice.  
         Writing memory.  
         Pending lists can't get larger with a readonly context.  
             Flush all pending reads and writes to prevent the pending lists
             from getting any larger.  Insn scheduling runs too slowly when
             these lists get long.  When compiling GCC with itself,
             this flush occurs 8 times for sparc, and 10 times for m88k using
             the default value of 32.  
     Analyze reads.  

References sched_analyze_2().

static void sched_analyze_2 ( struct deps_desc ,
rtx  ,
rtx   
)
static

Referenced by sched_analyze_1().

static void sched_analyze_2 ( )
static
   Analyze the uses of memory and registers in rtx X in INSN.  
         Ignore constants.  
         User of CC0 depends on immediately preceding insn.  
          Don't move CC0 setter to another block (it can set up the
        same flag for previous CC0 users which is safe).  
         Treat all reads of a stack register as modifying the TOS.  
             Avoid analyzing the same register twice.  
           Reading memory.  
           Always add these dependencies to pending_reads, since
           this insn may be followed by a write.  
       Force pending stores to memory in case a trap handler needs them.  
         Prefetch insn contains addresses only.  So if the prefetch
         address has no registers, there will be no dependencies on
         the prefetch insn.  This is wrong with result code
         correctness point of view as such prefetch can be moved below
         a jump insn which usually generates MOVE_BARRIER preventing
         to move insns containing registers or memories through the
         barrier.  It is also wrong with generated code performance
         point of view as prefetch withouth dependecies will have a
         tendency to be issued later instead of earlier.  It is hard
         to generate accurate dependencies for prefetch insns as
         prefetch has only the start address but it is better to have
         something than nothing.  
         FALLTHRU 
           Traditional and volatile asm instructions must be considered to use
           and clobber all hard registers, all pseudo-registers and all of
           memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.

           Consider for instance a volatile asm that changes the fpu rounding
           mode.  An insn should not be moved across this even if it only uses
           pseudo-regs because it might give an incorrectly rounded result.  
           For all ASM_OPERANDS, we must traverse the vector of input operands.
           We can not just fall through here since then we would be confused
           by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
           traditional asms unlike their normal usage.  
         These both read and modify the result.  We must handle them as writes
         to get proper dependencies for following instructions.  We must handle
         them as reads to get proper dependencies from this to previous
         instructions.  Thus we need to pass them to both sched_analyze_1
         and sched_analyze_2.  We must call sched_analyze_2 first in order
         to get the proper antecedent for the read.  
         op0 = op0 + op1 
     Other cases: walk the insn.  

References add_insn_mem_dependence(), cselib_lookup_from_insn(), gen_rtx_MEM(), and sched_deps_info_def::use_cselib.

static void sched_analyze_insn ( struct deps_desc ,
rtx  ,
rtx   
)
static
static void sched_analyze_insn ( )
static
   Analyze an INSN with pattern X to find all dependencies.  
       Avoid moving trapping instructions across function calls that might
       not always return.  
     We must avoid creating a situation in which two successors of the
     current block have different unwind info after scheduling.  If at any
     point the two paths re-join this leads to incorrect unwind info.  
     ??? There are certain situations involving a forced frame pointer in
     which, with extra effort, we could fix up the unwind info at a later
     CFG join.  However, it seems better to notice these cases earlier
     during prologue generation and avoid marking the frame pointer setup
     as frame-related at all.  
         Make sure prologue insn is scheduled before next jump.  
         Make sure epilogue insn is scheduled after preceding jumps.  
         ??? Should be recording conditions so we reduce the number of
         false dependencies.  
         Bare clobber insns are used for letting life analysis, reg-stack
         and others know that a value is dead.  Depend on the last call
         instruction so that reg-stack won't get confused.  
     Mark registers CLOBBERED or used by called function.  
         Don't schedule anything after a tail call, tail call needs
         to use at least all call-saved registers.  
                 Make latency of jump equal to 0 by using anti-dependence.  
             All memory writes and volatile reads must happen before the
             jump.  Non-volatile reads must happen before the jump iff
             the result is needed by the above register used mask.  
     If this instruction can throw an exception, then moving it changes
     where block boundaries fall.  This is mighty confusing elsewhere.
     Therefore, prevent such an instruction from being moved.  Same for
     non-jump instructions that define block boundaries.
     ??? Unclear whether this is still necessary in EBB mode.  If not,
     add_branch_dependences should be adjusted for RGN mode instead.  
     Add register dependencies for insn.  
             There's no point in making REG_DEP_CONTROL dependencies for
             debug insns.  
         Quite often, a debug insn will refer to stuff in the
         previous instruction, but the reason we want this
         dependency here is to make sure the scheduler doesn't
         gratuitously move a debug insn ahead.  This could dirty
         DF flags and cause additional analysis that wouldn't have
         occurred in compilation without debug insns, and such
         additional analysis can modify the generated code.  
         If the current insn is conditional, we can't free any
         of the lists.  
         Set up the pending barrier found.  
     Add dependencies if a scheduling barrier was found.  
         In the case of barrier the most added dependencies are not
         real, so we use anti-dependence here.  
         Don't flush pending lists on speculative checks for
         selective scheduling.  
     If a post-call group is still open, see if it should remain so.
     This insn must be a simple move of a hard reg to a pseudo or
     vice-versa.

     We must avoid moving these insns for correctness on targets
     with small register classes, and for special registers like
     PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
     hard regs for all targets.  
               We don't want to mark debug insns as part of the same
               sched group.  We know they really aren't, but if we use
               debug insns to tell that a call group is over, we'll
               get different code if debug insns are not there and
               instructions that follow seem like they should be part
               of the call group.

               Also, if we did, chain_to_prev_insn would move the
               deps of the debug insn to the call insn, modifying
               non-debug post-dependency counts of the debug insn
               dependencies and otherwise messing with the scheduling
               order.

               Instead, let such debug insns be scheduled freely, but
               keep the call group open in case there are insns that
               should be part of it afterwards.  Since we grant debug
               insns higher priority than even sched group insns, it
               will all turn out all right.  
       INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
       be speculated.  
static void sched_analyze_reg ( struct deps_desc deps,
int  regno,
enum machine_mode  mode,
enum rtx_code  ref,
rtx  insn 
)
static
   Analyze a single reference to register (reg:MODE REGNO) in INSN.
   The type of the reference is specified by REF and can be SET,
   CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  
     We could emit new pseudos in renaming.  Extend the reg structures.  
     A hard reg in a wide mode may really be multiple registers.
     If so, mark all of them just like the first.  
     ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
     it does not reload.  Ignore these as they have served their
     purpose already.  
         Pseudos that are REG_EQUIV to something may be replaced
         by that during reloading.  We need only add dependencies for
        the address in the REG_EQUIV note.  
         Don't let it cross a call after scheduling if it doesn't
         already cross one.  

References add_to_hard_reg_set().

void sched_deps_finish ( void  )
   Finalize dependency information for the whole function.  

References ds_merge(), estimate_dep_weak(), and set_dep_weak().

void sched_deps_init ( )
   If it is profitable to use them, initialize or extend (depending on
   GLOBAL_P) dependency data.  
     Average number of insns in the basic block.
     '+ 1' is used to make it nonzero.  
     We use another caching mechanism for selective scheduling, so
     we don't use this one.  
         ?!? We could save some memory by computing a per-region luid mapping
         which could reduce both the number of vectors in the cache and the
         size of each vector.  Instead we just avoid the cache entirely unless
         the average number of instructions in a basic block is very high.  See
         the comment before the declaration of true_dependency_cache for
         what we consider "very high".  
                                      Allocate lists for one block at a time.  
                                      Allocate nodes for one block at a time.
                                      We assume that average insn has
                                      5 producers.  

References BE_IN_CONTROL_BITS_OFFSET, BE_IN_DATA_BITS_OFFSET, BEGIN_CONTROL_BITS_OFFSET, and BEGIN_DATA_BITS_OFFSET.

Referenced by code_motion_process_successors().

void sched_free_deps ( )
   Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
   deps_lists.  
     We make two passes since some insns may be scheduled before their
     dependencies are resolved.  
           Clear forward deps and leave the dep_nodes to the
           corresponding back_deps list.  
           Clear resolved back deps together with its dep_nodes.  

References current_sched_info, DO_SPECULATION, and haifa_sched_info::flags.

static rtx sched_get_condition_with_rev ( )
static
   Caching variant of sched_get_condition_with_rev_uncached.
   We only do actual work the first time we come here for an insn; the
   results are cached in INSN_CACHED_COND and INSN_REVERSE_COND.  

Referenced by sched_get_reverse_condition_uncached().

static rtx sched_get_condition_with_rev_uncached ( )
static
   Find the condition under which INSN is executed.  If REV is not NULL,
   it is set to TRUE when the returned comparison should be reversed
   to get the actual condition.  
rtx sched_get_reverse_condition_uncached ( )
   Return the condition under which INSN does not execute (i.e.  the
   not-taken condition for a conditional branch), or NULL if we cannot
   find such a condition.  The caller should make a copy of the condition
   before using it.  

References sched_get_condition_with_rev().

Referenced by sd_copy_back_deps().

static bool sched_has_condition_p ( const_rtx  )
static
static bool sched_has_condition_p ( )
static
   True when we can find a condition under which INSN is executed.  
bool sched_insn_is_legitimate_for_speculation_p ( )
   Return true if INSN can potentially be speculated with type DS.  
       The following instructions, which depend on a speculatively scheduled
       instruction, cannot be speculatively scheduled along.  
           If instruction might fault, it cannot be speculatively scheduled.
           For control speculation it's obvious why and for data speculation
           it's because the insn might get wrong input if speculation
           wasn't successful.  
           If this is a predicated instruction, then it cannot be
           speculatively scheduled.  See PR35659.  

Referenced by sched_finish().

bool sched_insns_conditions_mutex_p ( )
   Return true if insn1 and insn2 can never depend on one another because
   the conditions under which they are executed are mutually exclusive.  
     df doesn't handle conditional lifetimes entirely correctly;
     calls mess up the conditional lifetimes.  
             Make sure first instruction doesn't affect condition of second
             instruction if switched.  
             Make sure second instruction doesn't affect condition of first
             instruction if switched.  
void sd_add_dep ( )
   Add dependence described by DEP.
   If RESOLVED_P is true treat the dependence as a resolved one.  
     If we are adding a dependency to INSN's LOG_LINKs, then note that
     in the bitmap caches of dependency information.  

References bitmap_clear_bit(), current_sched_info, delete_dep_node(), DO_SPECULATION, haifa_sched_info::flags, get_back_and_forw_lists(), _sd_iterator::linkp, remove_from_deps_list(), and _sd_iterator::resolved_p.

Referenced by try_ready().

enum DEPS_ADJUST_RESULT sd_add_or_update_dep ( )
   Add or update backward dependence between INSN and ELEM
   with given type DEP_TYPE and dep_status DS.
   This function is a convenience wrapper.  
void sd_copy_back_deps ( )
   Make TO depend on all the FROM's producers.
   If RESOLVED_P is true add dependencies to the resolved lists.  

References current_sched_info, DO_PREDICATION, haifa_sched_info::flags, real_insn_for_shadow(), and sched_get_reverse_condition_uncached().

void sd_debug_dep ( )
   Dump all fields of DEP to STDERR.  
void sd_debug_lists ( )
   Dump all information about deps_lists of INSN specified by TYPES
   to STDERR.  

References remove_free_EXPR_LIST_node(), and remove_free_INSN_LIST_node().

void sd_delete_dep ( )
   Remove a dependency referred to by SD_IT.
   SD_IT will point to the next dependence after removal.  

References note_uses(), and record_hard_reg_uses().

dep_t sd_find_dep_between ( )
   Find a dependency between producer PRO and consumer CON.
   Use dependency [if available] to check if dependency is present at all.
   Search through resolved dependency lists if RESOLVED_P is true.
   If the dependency or NULL if none found.  
       Avoiding the list walk below can cut compile times dramatically
       for some code.  

Referenced by resolve_dependencies().

static dep_t sd_find_dep_between_no_cache ( rtx  pro,
rtx  con,
bool  resolved_p,
sd_iterator_def sd_it_ptr 
)
static
   Find a dependency between producer PRO and consumer CON.
   Search through resolved dependency lists if RESOLVED_P is true.
   If no such dependency is found return NULL,
   otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
   with an iterator pointing to it.  
     Walk through either back list of INSN or forw list of ELEM
     depending on which one is shorter.  
         Find the dep_link with producer PRO in consumer's back_deps.  
         Find the dep_link with consumer CON in producer's forw_deps.  
void sd_finish_insn ( )
   Free data for INSN.  
     ??? It would be nice to deallocate dependency caches here.  
void sd_init_insn ( )
   Initialize data for INSN.  
     ??? It would be nice to allocate dependency caches here.  

Referenced by chain_to_prev_insn_p().

bool sd_lists_empty_p ( )
   Return true if INSN's lists defined by LIST_TYPES are all empty.  

Referenced by dying_use_p(), and set_spec_fed().

int sd_lists_size ( )
   Return the summary size of INSN's lists defined by LIST_TYPES.  

Referenced by sd_resolve_dep().

void sd_next_list ( const_rtx  insn,
sd_list_types_def types_ptr,
deps_list_t list_ptr,
bool *  resolved_p_ptr 
)
   Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
   initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
   and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
   This function is used to switch sd_iterator to the next list.
   !!! For internal use only.  Might consider moving it to sched-int.h.  

References deps_list_empty_p(), and sd_next_list().

Referenced by sd_next_list().

void sd_resolve_dep ( )
   Resolved dependence pointed to by SD_IT.
   SD_IT will advance to the next element.  

References sd_lists_size().

void sd_unresolve_dep ( )
   Perform the inverse operation of sd_resolve_dep.  Restore the dependence
   pointed to by SD_IT to unresolved state.  

References dump_dep().

ds_t set_dep_weak ( )
   Return the dep_status, which has the same parameters as DS, except for
   speculative type TYPE, that will have weakness DW.  

Referenced by sched_deps_finish(), and update_dependency_caches().

static void set_dependency_caches ( )
static
   Set dependency caches according to DEP.  

References bitmap_clear_bit(), _sd_iterator::linkp, and move_dep_link().

static void setup_insn_reg_uses ( )
static
   Set up insn register uses for INSN and dependency context DEPS.  
           Ignore use which is not dying.  
         Create the cycle list of uses.  
static enum DEPS_ADJUST_RESULT update_dep ( dep_t  dep,
dep_t  new_dep,
sd_iterator_def  sd_it,
rtx  mem1,
rtx  mem2 
)
static
   Update DEP to incorporate information from NEW_DEP.
   SD_IT points to DEP in case it should be moved to another list.
   MEM1 and MEM2, if nonnull, correspond to memory locations in case if
   data-speculative dependence should be updated.  
     If this is a more restrictive type of dependence than the
     existing one, then change the existing dependence to this
     type.  
       Update DEP_STATUS.  
             Either existing dep or a dep we're adding or both are
             speculative.  
               The new dep can't be speculative.  
                 Both are speculative.  Merge probabilities.  
       The old dep was speculative, but now it isn't.  

References ask_dependency_caches(), DEP_CHANGED, DEP_CREATED, and DEP_PRESENT.

static void update_dependency_caches ( )
static
   Type of dependence DEP have changed from OLD_TYPE.  Update dependency
   caches accordingly.  
     Clear corresponding cache entry because type of the link
     may have changed.  Keep them if we use_deps_list.  

References ds_merge(), estimate_dep_weak(), and set_dep_weak().


Variable Documentation

bitmap_head* anti_dependency_cache = NULL
static
int cache_size
static
bool can_start_lhs_rhs_p
static
   Internal variable for sched_analyze_[12] () functions.
   If it is nonzero, this means that sched_analyze_[12] looks
   at the most toplevel SET.  
bitmap_head* control_dependency_cache = NULL
static
rtx cur_insn = NULL_RTX
static
   Instruction which dependencies we are analyzing.  

Referenced by has_preds_in_current_region_p(), and moveup_set_expr().

alloc_pool dl_pool
static
   Pool to hold dependencies lists (deps_list_t).  
int dl_pool_diff = 0
static
   Number of deps_lists out there.  
alloc_pool dn_pool
static
   Pool to hold all dependency nodes (dep_node_t).  
int dn_pool_diff = 0
static
   Number of dep_nodes out there.  
int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON)
static
   Default flags for dump_dep ().  
   The data is specific to the Haifa scheduler.  
HARD_REG_SET implicit_reg_pending_clobbers
static
   Hard registers implicitly clobbered or used (or may be implicitly
   clobbered or used) by the currently analyzed insn.  For example,
   insn in its constraint has one register class.  Even if there is
   currently no hard register in the insn, the particular hard
   register will be in the insn after reload pass because the
   constraint requires it.  
HARD_REG_SET implicit_reg_pending_uses
static
bool mark_as_hard
static
   True if we should mark added dependencies as a non-register deps.  
bitmap_head* output_dependency_cache = NULL
static
enum reg_pending_barrier_mode reg_pending_barrier
static
regset reg_pending_clobbers
static
regset reg_pending_control_uses
static
regset reg_pending_sets
static
regset reg_pending_uses
static
struct reg_pressure_data reg_pressure_info[N_REG_CLASSES]
static
   Register pressure info for the currently processed insn.  

Referenced by mark_insn_hard_regno_birth(), and note_mem_dep().

struct sched_deps_info_def* sched_deps_info
   Holds current parameters for the dependency analyzer.  

Referenced by dep_cost_1(), and dump_insn_location().

bitmap_head* spec_dependency_cache = NULL
static
bitmap_head* true_dependency_cache = NULL
static
   To speed up the test for duplicate dependency links we keep a
   record of dependencies created by add_dependence when the average
   number of instructions in a basic block is very large.

   Studies have shown that there is typically around 5 instructions between
   branches for typical C code.  So we can make a guess that the average
   basic block is approximately 5 instructions long; we will choose 100X
   the average size as a very large basic block.

   Each insn has associated bitmaps for its dependencies.  Each bitmap
   has enough entries to represent a dependency on any other insn in
   the insn chain.  All bitmap for true dependencies cache is
   allocated then the rest two ones are also allocated.