GCC Middle and Back End API Reference
|
Go to the source code of this file.
Data Structures | |
struct | _stmt_info_for_cost |
struct | _slp_tree |
struct | _slp_instance |
struct | _slp_oprnd_info |
struct | _vect_peel_info |
struct | _vect_peel_extended_info |
struct | peel_info_hasher |
struct | _loop_vec_info |
struct | _bb_vec_info |
struct | _stmt_vec_info |
Typedefs | |
typedef source_location | LOC |
typedef struct _stmt_info_for_cost | stmt_info_for_cost |
typedef vec< stmt_info_for_cost > | stmt_vector_for_cost |
typedef struct _slp_tree * | slp_tree |
typedef struct _slp_instance * | slp_instance |
typedef struct _slp_oprnd_info * | slp_oprnd_info |
typedef struct _vect_peel_info * | vect_peel_info |
typedef struct _vect_peel_extended_info * | vect_peel_extended_info |
typedef struct _loop_vec_info * | loop_vec_info |
typedef struct _bb_vec_info * | bb_vec_info |
typedef struct data_reference * | dr_p |
typedef struct _stmt_vec_info * | stmt_vec_info |
typedef void * | vec_void_p |
typedef gimple(* | vect_recog_func_ptr )(vec< gimple > *, tree *, tree *) |
Variables | |
vec< vec_void_p > | stmt_vec_info_vec |
LOC | vect_location |
unsigned int | current_vector_size |
typedef struct _bb_vec_info * bb_vec_info |
typedef struct data_reference* dr_p |
typedef source_location LOC |
typedef struct _loop_vec_info * loop_vec_info |
Info on vectorized loops.
typedef struct _slp_instance * slp_instance |
SLP instance is a sequence of stmts in a loop that can be packed into SIMD stmts.
typedef struct _slp_oprnd_info * slp_oprnd_info |
This structure is used in creation of an SLP tree. Each instance corresponds to the same operand in a group of scalar stmts in an SLP node.
typedef struct _stmt_info_for_cost stmt_info_for_cost |
Structure to encapsulate information about a group of like instructions to be presented to the target cost model.
typedef struct _stmt_vec_info * stmt_vec_info |
typedef vec<stmt_info_for_cost> stmt_vector_for_cost |
typedef void* vec_void_p |
Avoid on stmt_vec_info.
typedef struct _vect_peel_extended_info * vect_peel_extended_info |
typedef struct _vect_peel_info * vect_peel_info |
In tree-vect-patterns.c.
Pattern recognition functions. Additional pattern recognition functions can (and will) be added in the future.
enum dr_alignment_support |
enum operation_type |
enum slp_vect_type |
The type of vectorization that can be applied to the stmt: regular loop-based vectorization; pure SLP - the stmt is a part of SLP instances and does not have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is a part of SLP instance and also must be loop-based vectorized, since it has uses outside SLP sequences. In the loop context the meanings of pure and hybrid SLP are slightly different. By saying that pure SLP is applied to the loop, we mean that we exploit only intra-iteration parallelism in the loop; i.e., the loop can be vectorized without doing any conceptual unrolling, cause we don't pack together stmts from different iterations, only within a single iteration. Loop hybrid SLP means that we exploit both intra-iteration and inter-iteration parallelism (e.g., number of elements in the vector is 4 and the slp-group-size is 2, in which case we don't have enough parallelism within an iteration, so we obtain the rest of the parallelism from subsequent iterations by unrolling the loop by 2).
enum stmt_vec_info_type |
Info on vectorized defs.
enum vect_def_type |
enum vect_relevant |
enum vect_var_kind |
|
inlinestatic |
Alias targetm.vectorize.add_stmt_cost.
References targetm.
Referenced by record_stmt_cost(), vect_analyze_slp_cost(), vect_bb_vectorization_profitable_p(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_model_induction_cost(), vect_model_promotion_demotion_cost(), vect_model_reduction_cost(), and vect_update_slp_costs_according_to_vf().
|
inlinestatic |
References count, _stmt_info_for_cost::count, _stmt_info_for_cost::kind, _stmt_info_for_cost::misalign, si, and _stmt_info_for_cost::stmt.
Referenced by record_stmt_cost().
|
inlinestatic |
Return TRUE if the data access is aligned, and FALSE otherwise.
Referenced by vect_enhance_data_refs_alignment(), vect_supportable_dr_alignment(), vector_alignment_reachable_p(), vectorizable_load(), and vectorizable_store().
|
inlinestatic |
Alias targetm.vectorize.builtin_vectorization_cost.
References targetm.
Referenced by record_stmt_cost(), and vect_get_stmt_cost().
tree bump_vector_ptr | ( | tree | dataref_ptr, |
gimple | ptr_incr, | ||
gimple_stmt_iterator * | gsi, | ||
gimple | stmt, | ||
tree | bump | ||
) |
Function bump_vector_ptr Increment a pointer (to a vector type) by vector-size. If requested, i.e. if PTR-INCR is given, then also connect the new increment stmt to the existing def-use update-chain of the pointer, by modifying the PTR_INCR as illustrated below: The pointer def-use update-chain before this function: DATAREF_PTR = phi (p_0, p_2) .... PTR_INCR: p_2 = DATAREF_PTR + step The pointer def-use update-chain after this function: DATAREF_PTR = phi (p_0, p_2) .... NEW_DATAREF_PTR = DATAREF_PTR + BUMP .... PTR_INCR: p_2 = NEW_DATAREF_PTR + step Input: DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated in the loop. PTR_INCR - optional. The stmt that updates the pointer in each iteration of the loop. The increment amount across iterations is expected to be vector_size. BSI - location where the new update stmt is to be placed. STMT - the original scalar memory-access stmt that is being vectorized. BUMP - optional. The offset by which to bump the pointer. If not given, the offset is assumed to be vector_size. Output: Return NEW_DATAREF_PTR as illustrated above.
References copy_ssa_name(), DR_PTR_INFO, duplicate_ssa_name_ptr_info(), gimple_build_assign_with_ops(), mark_ptr_info_alignment_unknown(), tree_int_cst_compare(), vect_finish_stmt_generation(), and vinfo_for_stmt().
Referenced by vectorizable_load(), and vectorizable_store().
|
inlinestatic |
Alias targetm.vectorize.destroy_cost_data.
References targetm.
Referenced by destroy_bb_vec_info(), and destroy_loop_vec_info().
void destroy_loop_vec_info | ( | loop_vec_info | , |
bool | |||
) |
In tree-vect-loop.c.
FORNOW: Used in tree-parloops.c.
LOC find_bb_location | ( | basic_block | ) |
|
inlinestatic |
Alias targetm.vectorize.finish_cost.
References targetm.
Referenced by vect_bb_vectorization_profitable_p(), and vect_estimate_min_profitable_iters().
void free_stmt_vec_info | ( | gimple | stmt | ) |
void free_stmt_vec_info_vec | ( | void | ) |
Free hash table for stmt_vec_info.
References free_stmt_vec_info(), and stmt_vec_info_vec.
Referenced by execute_vect_slp(), parallelize_loops(), and vectorize_loops().
|
inlinestatic |
Return the earlier statement between STMT1 and STMT2.
References gimple_uid().
Referenced by vect_analyze_data_ref_dependence(), vect_find_first_load_in_slp_instance(), and vect_slp_analyze_data_ref_dependence().
Function get_initial_def_for_reduction Input: STMT - a stmt that performs a reduction operation in the loop. INIT_VAL - the initial value of the reduction variable Output: ADJUSTMENT_DEF - a tree that holds a value to be added to the final result of the reduction (used for adjusting the epilog - see below). Return a vector variable, initialized according to the operation that STMT performs. This vector will be used as the initial value of the vector of partial results. Option1 (adjust in epilog): Initialize the vector as follows: add/bit or/xor: [0,0,...,0,0] mult/bit and: [1,1,...,1,1] min/max/cond_expr: [init_val,init_val,..,init_val,init_val] and when necessary (e.g. add/mult case) let the caller know that it needs to adjust the result by init_val. Option2: Initialize the vector as follows: add/bit or/xor: [init_val,0,0,...,0] mult/bit and: [init_val,1,1,...,1] min/max/cond_expr: [init_val,init_val,...,init_val] and no adjustments are needed. For example, for the following code: s = init_val; for (i=0;i<n;i++) s = s + a[i]; STMT is 's = s + a[i]', and the reduction variable is 's'. For a vector of 4 units, we want to return either [0,0,0,init_val], or [0,0,0,0] and let the caller know that it needs to adjust the result at the end by 'init_val'. FORNOW, we are using the 'adjust in epilog' scheme, because this way the initialization vector is simpler (same element in all entries), if ADJUSTMENT_DEF is not NULL, and Option2 otherwise. A cost model should help decide between these two schemes.
References build_constructor(), build_int_cst(), build_real(), build_vector_from_val(), dconst0, dconst1, flow_bb_inside_loop_p(), get_vectype_for_scalar_type(), gimple_assign_rhs_code(), nested_in_vect_loop_p(), vec_alloc(), vect_create_destination_var(), vect_double_reduction_def, vect_get_vec_def_for_operand(), and vinfo_for_stmt().
Referenced by vect_create_epilog_for_reduction(), and vect_get_vec_def_for_operand().
|
inlinestatic |
Return the later statement between STMT1 and STMT2.
References gimple_uid().
Referenced by vect_find_last_store_in_slp_instance().
|
inlinestatic |
Alias targetm.vectorize.init_cost.
References targetm.
Referenced by free_ira_costs(), ira_init_costs(), ira_init_costs_once(), new_bb_vec_info(), new_loop_vec_info(), and record_operand_costs().
void init_stmt_vec_info_vec | ( | void | ) |
Create a hash table for stmt_vec_info.
References stmt_vec_info_vec.
Referenced by execute_vect_slp(), parallelize_loops(), and vectorize_loops().
|
inlinestatic |
Return true if BB is a loop header.
References basic_block_def::loop_father, and basic_block_def::preds.
Referenced by new_stmt_vec_info(), vect_analyze_loop_operations(), vect_is_simple_reduction_1(), and vect_is_slp_reduction().
|
inlinestatic |
Return TRUE if a statement represented by STMT_INFO is a part of a pattern.
References vinfo_for_stmt().
Referenced by process_use(), vect_analyze_stmt(), vect_determine_vectorization_factor(), vect_is_simple_reduction_1(), vect_mark_stmts_to_be_vectorized(), vect_remove_slp_scalar_calls(), vect_remove_stores(), vect_schedule_slp(), vect_schedule_slp_instance(), vect_transform_loop(), vectorizable_call(), and vectorizable_store().
|
inlinestatic |
Return TRUE if the alignment of the data access is known, and FALSE otherwise.
Referenced by vect_enhance_data_refs_alignment(), vect_supportable_dr_alignment(), vect_update_misalignment_for_peel(), and vector_alignment_reachable_p().
|
inlinestatic |
References loop::aux.
Referenced by set_prologue_iterations(), slpeel_tree_peel_loop_to_edge(), vect_analyze_loop(), and vect_create_epilog_for_reduction().
|
inlinestatic |
References loop::inner.
Referenced by get_initial_def_for_induction(), get_initial_def_for_reduction(), supportable_widening_operation(), vect_analyze_data_ref_access(), vect_analyze_data_refs(), vect_compute_data_ref_alignment(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_init_vector_1(), vect_model_reduction_cost(), vect_recog_dot_prod_pattern(), vect_recog_widen_sum_pattern(), vect_setup_realignment(), vect_supportable_dr_alignment(), vect_transform_stmt(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_reduction(), and vectorizable_store().
stmt_vec_info new_stmt_vec_info | ( | gimple | stmt, |
loop_vec_info | loop_vinfo, | ||
bb_vec_info | bb_vinfo | ||
) |
Function new_stmt_vec_info. Create and initialize a new stmt_vec_info struct for STMT.
References is_loop_header_bb_p(), loop_vect, undef_vec_info_type, vect_internal_def, vect_unknown_def_type, and vect_unused_in_scope.
Referenced by get_initial_def_for_induction(), new_bb_vec_info(), new_loop_vec_info(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_finish_stmt_generation(), vect_is_simple_reduction_1(), vect_mark_pattern_stmts(), vect_recog_bool_pattern(), vect_recog_divmod_pattern(), vect_recog_mixed_size_cond_pattern(), vect_recog_rotate_pattern(), vectorizable_load(), and vectorizable_reduction().
unsigned record_stmt_cost | ( | stmt_vector_for_cost * | body_cost_vec, |
int | count, | ||
enum vect_cost_for_stmt | kind, | ||
stmt_vec_info | stmt_info, | ||
int | misalign, | ||
enum vect_cost_model_location | where | ||
) |
Record the cost of a statement, either by directly informing the target model or by saving it in a vector for later processing. Return a preliminary estimate of the statement's cost.
References add_stmt_cost(), add_stmt_info_to_vec(), builtin_vectorization_cost(), count, and stmt_vectype().
Referenced by vect_analyze_slp_cost_1(), vect_get_known_peeling_cost(), vect_get_load_cost(), vect_get_store_cost(), vect_model_load_cost(), vect_model_simple_cost(), and vect_model_store_cost().
|
inlinestatic |
Set vectorizer information INFO for STMT.
References gimple_set_uid(), and gimple_uid().
Referenced by free_stmt_vec_info(), get_initial_def_for_induction(), new_bb_vec_info(), new_loop_vec_info(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_finish_stmt_generation(), vect_is_simple_reduction_1(), vect_mark_pattern_stmts(), vect_recog_bool_pattern(), vect_recog_divmod_pattern(), vect_recog_mixed_size_cond_pattern(), vect_recog_rotate_pattern(), vect_remove_slp_scalar_calls(), vectorizable_call(), vectorizable_load(), and vectorizable_reduction().
bool slpeel_can_duplicate_loop_p | ( | const struct loop * | , |
const_edge | |||
) |
Function prototypes.
Simple loop peeling and versioning utilities for vectorizer's purposes - in tree-vect-loop-manip.c.
bool supportable_narrowing_operation | ( | enum tree_code | code, |
tree | vectype_out, | ||
tree | vectype_in, | ||
enum tree_code * | code1, | ||
int * | multi_step_cvt, | ||
vec< tree > * | interm_types | ||
) |
Function supportable_narrowing_operation Check whether an operation represented by the code CODE is a narrowing operation that is supported by the target platform in vector form (i.e., when operating on arguments of type VECTYPE_IN and producing a result of type VECTYPE_OUT). Narrowing operations we currently support are NOP (CONVERT) and FIX_TRUNC. This function checks if these operations are supported by the target platform directly via vector tree-codes. Output: - CODE1 is the code of a vector operation to be used when vectorizing the operation, if available. - MULTI_STEP_CVT determines the number of required intermediate steps in case of multi-step conversion (like int->short->char - in that case MULTI_STEP_CVT will be 1). - INTERM_TYPES contains the intermediate type required to perform the narrowing operation (short in the above example).
References insn_data, insn_data_d::operand, optab_default, optab_for_tree_code(), optab_handler(), lang_hooks_for_types::type_for_mode, lang_hooks::types, and unknown_optab.
Referenced by vectorizable_conversion().
bool supportable_widening_operation | ( | enum tree_code | code, |
gimple | stmt, | ||
tree | vectype_out, | ||
tree | vectype_in, | ||
enum tree_code * | code1, | ||
enum tree_code * | code2, | ||
int * | multi_step_cvt, | ||
vec< tree > * | interm_types | ||
) |
Function supportable_widening_operation Check whether an operation represented by the code CODE is a widening operation that is supported by the target platform in vector form (i.e., when operating on arguments of type VECTYPE_IN producing a result of type VECTYPE_OUT). Widening operations we currently support are NOP (CONVERT), FLOAT and WIDEN_MULT. This function checks if these operations are supported by the target platform either directly (via vector tree-codes), or via target builtins. Output: - CODE1 and CODE2 are codes of vector operations to be used when vectorizing the operation, if available. - MULTI_STEP_CVT determines the number of required intermediate steps in case of multi-step conversion (like char->short->int - in that case MULTI_STEP_CVT will be 1). - INTERM_TYPES contains the intermediate type required to perform the widening operation (short in the above example).
References insn_data, nested_in_vect_loop_p(), insn_data_d::operand, optab_default, optab_for_tree_code(), optab_handler(), supportable_widening_operation(), lang_hooks_for_types::type_for_mode, lang_hooks::types, vect_used_by_reduction, and vinfo_for_stmt().
Referenced by supportable_widening_operation(), vect_recog_widen_mult_pattern(), vect_recog_widen_shift_pattern(), and vectorizable_conversion().
|
inlinestatic |
References basic_block_def::aux.
Referenced by vect_slp_transform_bb().
bool vect_analyze_data_ref_accesses | ( | loop_vec_info | , |
bb_vec_info | |||
) |
bool vect_analyze_data_ref_dependences | ( | loop_vec_info | , |
int * | |||
) |
bool vect_analyze_data_refs | ( | loop_vec_info | loop_vinfo, |
bb_vec_info | bb_vinfo, | ||
int * | min_vf | ||
) |
Function vect_analyze_data_refs. Find all the data references in the loop or basic block. The general structure of the analysis of data refs in the vectorizer is as follows: 1- vect_analyze_data_refs(loop/bb): call compute_data_dependences_for_loop/bb to find and analyze all data-refs in the loop/bb and their dependences. 2- vect_analyze_dependences(): apply dependence testing using ddrs. 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok. 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
References affine_iv::base, create_data_ref(), DR_BASE_ADDRESS, DR_INIT, DR_IS_READ, DR_OFFSET, DR_REF, DR_STEP, DR_STMT, dump_enabled_p(), dump_generic_expr(), dump_gimple_stmt(), dump_printf(), dump_printf_loc(), find_data_references_in_loop(), find_data_references_in_stmt(), find_loop_nest(), free_data_ref(), get_inner_reference(), get_vectype_for_scalar_type(), gimple_call_arg(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_clobber_p(), gsi_end_p(), gsi_next(), gsi_start_bb(), gsi_stmt(), highest_pow2_factor(), host_integerp(), HOST_WIDE_INT, integer_zerop(), is_gimple_call(), loop_containing_stmt(), nested_in_vect_loop_p(), offset, loop::simduid, simple_iv(), split_constant_offset(), affine_iv::step, data_reference::stmt, stmt_can_throw_internal(), targetm, tree_int_cst_equal(), unshare_expr(), vect_check_gather(), vect_location, and vinfo_for_stmt().
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
bool vect_analyze_data_refs_alignment | ( | loop_vec_info | loop_vinfo, |
bb_vec_info | bb_vinfo | ||
) |
Function vect_analyze_data_refs_alignment Analyze the alignment of the data-references in the loop. Return FALSE if a data reference is found that cannot be vectorized.
References dump_enabled_p(), dump_printf_loc(), vect_compute_data_refs_alignment(), vect_find_same_alignment_drs(), and vect_location.
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
loop_vec_info vect_analyze_loop | ( | struct loop * | ) |
Drive for loop analysis stage.
loop_vec_info vect_analyze_loop_form | ( | struct loop * | ) |
bool vect_analyze_slp | ( | loop_vec_info | , |
bb_vec_info | |||
) |
bool vect_can_advance_ivs_p | ( | loop_vec_info | ) |
bool vect_can_force_dr_alignment_p | ( | const_tree | , |
unsigned | int | ||
) |
In tree-vect-data-refs.c.
tree vect_check_gather | ( | gimple | stmt, |
loop_vec_info | loop_vinfo, | ||
tree * | basep, | ||
tree * | offp, | ||
int * | scalep | ||
) |
Check whether a non-affine read in stmt is suitable for gather load and if so, return a builtin decl for that operation.
References do_add(), double_int_to_tree(), DR_REF, expr_invariant_in_loop_p(), extract_ops_from_tree(), get_gimple_rhs_class(), get_inner_reference(), gimple_assign_rhs1(), gimple_assign_rhs2(), gimple_assign_rhs_code(), GIMPLE_TERNARY_RHS, host_integerp(), HOST_WIDE_INT, integer_zerop(), mem_ref_offset(), targetm, tree_low_cst(), and vinfo_for_stmt().
Referenced by vect_analyze_data_refs(), vect_mark_stmts_to_be_vectorized(), and vectorizable_load().
tree vect_create_addr_base_for_vector_ref | ( | gimple | stmt, |
gimple_seq * | new_stmt_list, | ||
tree | offset, | ||
struct loop * | loop | ||
) |
Function vect_create_addr_base_for_vector_ref. Create an expression that computes the address of the first memory location that will be accessed for a data reference. Input: STMT: The statement containing the data reference. NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list. OFFSET: Optional. If supplied, it is be added to the initial address. LOOP: Specify relative to which loop-nest should the address be computed. For example, when the dataref is in an inner-loop nested in an outer-loop that is now being vectorized, LOOP can be either the outer-loop, or the inner-loop. The first memory location accessed by the following dataref ('in' points to short): for (i=0; i<N; i++) for (j=0; j<M; j++) s += in[i+j] is as follows: if LOOP=i_loop: &in (relative to i_loop) if LOOP=j_loop: &in+i*2B (relative to j_loop) Output: 1. Return an SSA_NAME whose value is the address of the memory location of the first vector of the data reference. 2. If new_stmt_list is not NULL_TREE after return then the caller must insert these statement(s) which define the returned SSA_NAME. FORNOW: We are only handling array accesses with step 1.
References build_pointer_type(), DR_BASE_ADDRESS, DR_INIT, DR_OFFSET, DR_PTR_INFO, DR_REF, dump_enabled_p(), dump_generic_expr(), dump_printf_loc(), duplicate_ssa_name_ptr_info(), force_gimple_operand(), get_name(), gimple_seq_add_seq(), mark_ptr_info_alignment_unknown(), nested_in_vect_loop_p(), unshare_expr(), vect_get_new_vect_var(), vect_location, vect_pointer_var, and vinfo_for_stmt().
Referenced by vect_create_cond_for_align_checks(), vect_create_data_ref_ptr(), vect_gen_niters_for_prolog_loop(), and vect_setup_realignment().
tree vect_create_data_ref_ptr | ( | gimple | stmt, |
tree | aggr_type, | ||
struct loop * | at_loop, | ||
tree | offset, | ||
tree * | initial_address, | ||
gimple_stmt_iterator * | gsi, | ||
gimple * | ptr_incr, | ||
bool | only_init, | ||
bool * | inv_p | ||
) |
Function vect_create_data_ref_ptr. Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first location accessed in the loop by STMT, along with the def-use update chain to appropriately advance the pointer through the loop iterations. Also set aliasing information for the pointer. This pointer is used by the callers to this function to create a memory reference expression for vector load/store access. Input: 1. STMT: a stmt that references memory. Expected to be of the form GIMPLE_ASSIGN <name, data-ref> or GIMPLE_ASSIGN <data-ref, name>. 2. AGGR_TYPE: the type of the reference, which should be either a vector or an array. 3. AT_LOOP: the loop where the vector memref is to be created. 4. OFFSET (optional): an offset to be added to the initial address accessed by the data-ref in STMT. 5. BSI: location where the new stmts are to be placed if there is no loop 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain pointing to the initial address. Output: 1. Declare a new ptr to vector_type, and have it point to the base of the data reference (initial addressed accessed by the data reference). For example, for vector of type V8HI, the following code is generated: v8hi *ap; ap = (v8hi *)initial_address; if OFFSET is not supplied: initial_address = &a[init]; if OFFSET is supplied: initial_address = &a[init + OFFSET]; Return the initial_address in INITIAL_ADDRESS. 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also update the pointer in each iteration of the loop. Return the increment stmt that updates the pointer in PTR_INCR. 3. Set INV_P to true if the access pattern of the data reference in the vectorized loop is invariant. Set it to false otherwise. 4. Return the pointer.
References alias_sets_conflict_p(), build_pointer_type_for_mode(), create_iv(), DR_BASE_ADDRESS, DR_BASE_OBJECT, DR_PTR_INFO, DR_REF, DR_STEP, dump_enabled_p(), dump_generic_expr(), dump_printf(), dump_printf_loc(), duplicate_ssa_name_ptr_info(), get_alias_set(), get_name(), gimple_assign_set_lhs(), gimple_bb(), gsi_insert_before(), gsi_insert_on_edge_immediate(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, gsi_stmt(), integer_zerop(), loop_preheader_edge(), make_ssa_name(), nested_in_vect_loop_p(), new_stmt_vec_info(), ptr_mode, set_vinfo_for_stmt(), standard_iv_increment_position(), tree_code_name, tree_int_cst_sgn(), useless_type_conversion_p(), vect_create_addr_base_for_vector_ref(), vect_get_new_vect_var(), vect_location, vect_pointer_var, and vinfo_for_stmt().
Referenced by vect_setup_realignment(), vectorizable_load(), and vectorizable_store().
void vect_detect_hybrid_slp | ( | loop_vec_info | ) |
void vect_do_peeling_for_alignment | ( | loop_vec_info | loop_vinfo, |
unsigned int | th, | ||
bool | check_profitability | ||
) |
Function vect_do_peeling_for_alignment Peel the first 'niters' iterations of the loop represented by LOOP_VINFO. 'niters' is set to the misalignment of one of the data references in the loop, thereby forcing it to refer to an aligned location at the beginning of the execution of this loop. The data reference for which we are peeling is recorded in LOOP_VINFO_UNALIGNED_DR.
References create_tmp_var(), dump_enabled_p(), dump_printf(), dump_printf_loc(), force_gimple_operand(), free_original_copy_tables(), double_int::from_shwi(), gsi_insert_seq_on_edge_immediate(), initialize_original_copy_tables(), loop_preheader_edge(), record_niter_bound(), scev_reset(), slpeel_tree_peel_loop_to_edge(), slpeel_verify_cfg_after_peeling(), types_compatible_p(), vect_build_loop_niters(), vect_gen_niters_for_prolog_loop(), vect_location, and vect_update_inits_of_drs().
Referenced by vect_transform_loop().
void vect_do_peeling_for_loop_bound | ( | loop_vec_info | loop_vinfo, |
tree * | ratio, | ||
unsigned int | th, | ||
bool | check_profitability | ||
) |
Function vect_do_peeling_for_loop_bound Peel the last iterations of the loop represented by LOOP_VINFO. The peeled iterations form a new epilog loop. Given that the loop now iterates NITERS times, the new epilog loop iterates NITERS % VECTORIZATION_FACTOR times. The original loop will later be made to iterate NITERS / VECTORIZATION_FACTOR times (this value is placed into RATIO). COND_EXPR and COND_EXPR_STMT_LIST are combined with a new generated test.
References dump_enabled_p(), dump_printf(), dump_printf_loc(), free_original_copy_tables(), double_int::from_shwi(), initialize_original_copy_tables(), loop_preheader_edge(), loop::num, record_niter_bound(), scev_reset(), single_exit(), slpeel_tree_peel_loop_to_edge(), slpeel_verify_cfg_after_peeling(), edge_def::src, vect_generate_tmps_on_preheader(), vect_location, and vect_update_ivs_after_vectorizer().
Referenced by vect_transform_loop().
bool vect_enhance_data_refs_alignment | ( | loop_vec_info | ) |
void vect_finish_stmt_generation | ( | gimple | stmt, |
gimple | vec_stmt, | ||
gimple_stmt_iterator * | gsi | ||
) |
Function vect_finish_stmt_generation. Insert a new stmt.
References copy_ssa_name(), dump_enabled_p(), dump_gimple_stmt(), dump_printf_loc(), gimple_assign_lhs(), gimple_call_flags(), gimple_has_mem_ops(), gimple_location(), gimple_set_location(), gimple_set_vdef(), gimple_set_vuse(), gimple_vdef(), gimple_vuse(), gimple_vuse_op(), gsi_end_p(), gsi_insert_before(), GSI_SAME_STMT, gsi_stmt(), is_gimple_assign(), is_gimple_call(), is_gimple_reg(), new_stmt_vec_info(), set_vinfo_for_stmt(), vect_location, and vinfo_for_stmt().
Referenced by bump_vector_ptr(), permute_vec_elements(), read_vector_array(), vect_create_mask_and_perm(), vect_create_vectorized_demotion_stmts(), vect_gen_widened_results_half(), vect_init_vector_1(), vect_permute_load_chain(), vect_permute_store_chain(), vectorizable_assignment(), vectorizable_call(), vectorizable_condition(), vectorizable_conversion(), vectorizable_load(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), vectorizable_store(), and write_vector_array().
gimple vect_force_simple_reduction | ( | loop_vec_info | loop_info, |
gimple | phi, | ||
bool | check_reduction, | ||
bool * | double_reduc | ||
) |
Wrapper around vect_is_simple_reduction_1, which will modify code in-place if it enables detection of more reductions. Arguments as there.
References vect_is_simple_reduction_1().
Referenced by gather_scalar_reductions(), and vect_analyze_scalar_cycles_1().
void vect_free_slp_instance | ( | slp_instance | ) |
In tree-vect-slp.c.
int vect_get_known_peeling_cost | ( | loop_vec_info | loop_vinfo, |
int | peel_iters_prologue, | ||
int * | peel_iters_epilogue, | ||
int | scalar_single_iter_cost, | ||
stmt_vector_for_cost * | prologue_cost_vec, | ||
stmt_vector_for_cost * | epilogue_cost_vec | ||
) |
Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times.
References cond_branch_taken, dump_enabled_p(), dump_printf_loc(), record_stmt_cost(), scalar_stmt, vect_epilogue, vect_location, and vect_prologue.
Referenced by vect_estimate_min_profitable_iters(), and vect_peeling_hash_get_lowest_cost().
void vect_get_load_cost | ( | struct data_reference * | dr, |
int | ncopies, | ||
bool | add_realign_cost, | ||
unsigned int * | inside_cost, | ||
unsigned int * | prologue_cost, | ||
stmt_vector_for_cost * | prologue_cost_vec, | ||
stmt_vector_for_cost * | body_cost_vec, | ||
bool | record_prologue_costs | ||
) |
Calculate cost of DR's memory access.
References dr_aligned, dr_explicit_realign, dr_explicit_realign_optimized, DR_STMT, dr_unaligned_supported, dr_unaligned_unsupported, dump_enabled_p(), dump_printf_loc(), record_stmt_cost(), data_reference::stmt, targetm, unaligned_load, vec_perm, vect_body, vect_location, vect_prologue, vect_supportable_dr_alignment(), vector_load, vector_stmt, and vinfo_for_stmt().
Referenced by vect_get_data_access_cost(), and vect_model_load_cost().
int vect_get_single_scalar_iteration_cost | ( | loop_vec_info | ) |
void vect_get_slp_defs | ( | vec< tree > | ops, |
slp_tree | slp_node, | ||
vec< vec< tree > > * | vec_oprnds, | ||
int | reduc_index | ||
) |
Get vectorized definitions for SLP_NODE. If the scalar definitions are loop invariants or constants, collect them and call vect_get_constant_vectors() to create vector stmts. Otherwise, the def-stmts must be already vectorized and the vectorized stmts must be stored in the corresponding child of SLP_NODE, and we call vect_get_slp_vect_defs () to retrieve them.
References first_stmt(), gimple_get_lhs(), HOST_WIDE_INT, operand_equal_p(), vect_get_constant_vectors(), vect_get_slp_vect_defs(), vect_get_smallest_scalar_type(), vinfo_for_stmt(), and vNULL.
Referenced by vect_get_vec_defs(), vectorizable_call(), and vectorizable_condition().
tree vect_get_smallest_scalar_type | ( | gimple | stmt, |
HOST_WIDE_INT * | lhs_size_unit, | ||
HOST_WIDE_INT * | rhs_size_unit | ||
) |
Return the smallest scalar part of STMT. This is used to determine the vectype of the stmt. We generally set the vectype according to the type of the result (lhs). For stmts whose result-type is different than the type of the arguments (e.g., demotion, promotion), vectype will be reset appropriately (later). Note that we have to visit the smallest datatype in this function, because that determines the VF. If the smallest datatype in the loop is present only as the rhs of a promotion operation - we'd miss it. Such a case, where a variable of this datatype does not appear in the lhs anywhere in the loop, can only occur if it's an invariant: e.g.: 'int_x = (int) short_inv', which we'd expect to have been optimized away by invariant motion. However, we cannot rely on invariant motion to always take invariants out of the loop, and so in the case of promotion we also have to check the rhs. LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding types.
References gimple_assign_cast_p(), gimple_assign_rhs1(), gimple_assign_rhs_code(), gimple_expr_type(), HOST_WIDE_INT, and is_gimple_assign().
Referenced by vect_build_slp_tree_1(), vect_determine_vectorization_factor(), and vect_get_slp_defs().
|
inlinestatic |
Get cost by calling cost target builtin.
References builtin_vectorization_cost().
Referenced by vect_bb_slp_scalar_cost(), vect_estimate_min_profitable_iters(), and vect_get_single_scalar_iteration_cost().
void vect_get_store_cost | ( | struct data_reference * | dr, |
int | ncopies, | ||
unsigned int * | inside_cost, | ||
stmt_vector_for_cost * | body_cost_vec | ||
) |
Calculate cost of DR's memory access.
References dr_aligned, DR_STMT, dr_unaligned_supported, dr_unaligned_unsupported, dump_enabled_p(), dump_printf_loc(), record_stmt_cost(), data_reference::stmt, unaligned_store, vect_body, vect_location, vect_supportable_dr_alignment(), vector_store, and vinfo_for_stmt().
Referenced by vect_get_data_access_cost(), and vect_model_store_cost().
void vect_get_vec_defs | ( | tree | op0, |
tree | op1, | ||
gimple | stmt, | ||
vec< tree > * | vec_oprnds0, | ||
vec< tree > * | vec_oprnds1, | ||
slp_tree | slp_node, | ||
int | reduc_index | ||
) |
Get vectorized definitions for OP0 and OP1. REDUC_INDEX is the index of reduction operand in case of reduction, and -1 otherwise.
References vect_get_slp_defs(), and vect_get_vec_def_for_operand().
Referenced by vect_create_epilog_for_reduction(), vectorizable_assignment(), vectorizable_conversion(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), and vectorizable_store().
bool vect_grouped_load_supported | ( | tree | , |
unsigned | HOST_WIDE_INT | ||
) |
bool vect_grouped_store_supported | ( | tree | , |
unsigned | HOST_WIDE_INT | ||
) |
tree vect_init_vector | ( | gimple | , |
tree | , | ||
tree | , | ||
gimple_stmt_iterator * | |||
) |
bool vect_is_simple_use | ( | tree | operand, |
gimple | stmt, | ||
loop_vec_info | loop_vinfo, | ||
bb_vec_info | bb_vinfo, | ||
gimple * | def_stmt, | ||
tree * | def, | ||
enum vect_def_type * | dt | ||
) |
Function vect_is_simple_use. Input: LOOP_VINFO - the vect info of the loop that is being vectorized. BB_VINFO - the vect info of the basic block that is being vectorized. OPERAND - operand of STMT in the loop or bb. DEF - the defining stmt in case OPERAND is an SSA_NAME. Returns whether a stmt with OPERAND can be vectorized. For loops, supportable operands are constants, loop invariants, and operands that are defined by the current iteration of the loop. Unsupportable operands are those that are defined by a previous iteration of the loop (as is the case in reduction/induction computations). For basic blocks, supportable operands are constants and bb invariants. For now, operands defined outside the basic block are not supported.
References dump_enabled_p(), dump_generic_expr(), dump_gimple_stmt(), dump_printf_loc(), flow_bb_inside_loop_p(), gimple_assign_lhs(), gimple_bb(), gimple_call_lhs(), gimple_nop_p(), gimple_phi_result(), is_gimple_min_invariant(), vect_constant_def, vect_double_reduction_def, vect_external_def, vect_location, vect_unknown_def_type, and vinfo_for_stmt().
Referenced by check_bool_pattern(), process_use(), type_conversion_p(), vect_analyze_slp_cost_1(), vect_get_and_check_slp_defs(), vect_get_vec_def_for_operand(), vect_is_simple_use_1(), vect_recog_rotate_pattern(), vect_recog_vector_vector_shift_pattern(), vectorizable_condition(), vectorizable_conversion(), vectorizable_live_operation(), vectorizable_operation(), vectorizable_reduction(), and vectorizable_store().
bool vect_is_simple_use_1 | ( | tree | operand, |
gimple | stmt, | ||
loop_vec_info | loop_vinfo, | ||
bb_vec_info | bb_vinfo, | ||
gimple * | def_stmt, | ||
tree * | def, | ||
enum vect_def_type * | dt, | ||
tree * | vectype | ||
) |
Function vect_is_simple_use_1. Same as vect_is_simple_use_1 but also determines the vector operand type of OPERAND and stores it to *VECTYPE. If the definition of OPERAND is vect_uninitialized_def, vect_constant_def or vect_external_def *VECTYPE will be set to NULL_TREE and the caller is responsible to compute the best suited vector type for the scalar operand.
References vect_constant_def, vect_double_reduction_def, vect_external_def, vect_induction_def, vect_internal_def, vect_is_simple_use(), vect_nested_cycle, vect_reduction_def, vect_uninitialized_def, and vinfo_for_stmt().
Referenced by vect_is_simple_cond(), vectorizable_assignment(), vectorizable_call(), vectorizable_conversion(), vectorizable_load(), vectorizable_operation(), vectorizable_reduction(), and vectorizable_shift().
bool vect_load_lanes_supported | ( | tree | , |
unsigned | HOST_WIDE_INT | ||
) |
void vect_loop_versioning | ( | loop_vec_info | loop_vinfo, |
unsigned int | th, | ||
bool | check_profitability | ||
) |
Function vect_loop_versioning. If the loop has data references that may or may not be aligned or/and has data reference relations whose independence was not proven then two versions of the loop need to be generated, one which is vectorized and one which isn't. A test is then generated to control which of the loops is executed. The test checks for the alignment of all of the data references that may or may not be aligned. An additional sequence of runtime tests is generated for each pairs of DDRs whose independence was not proven. The vectorized version of loop is executed only if both alias and alignment tests are passed. The test generated to check which version of loop is executed is modified to also check for profitability as indicated by the cost model initially. The versioning precondition(s) are placed in *COND_EXPR and *COND_EXPR_STMT_LIST.
References add_phi_arg(), adjust_phi_and_debug_stmts(), build_int_cst(), copy_ssa_name(), create_phi_node(), edge_def::dest, force_gimple_operand_1(), free_original_copy_tables(), gimple_phi_arg_location_from_edge(), gimple_seq_add_seq(), gsi_end_p(), gsi_insert_seq_before(), gsi_last_bb(), gsi_next(), GSI_SAME_STMT, gsi_start_phis(), gsi_stmt(), initialize_original_copy_tables(), is_gimple_condexpr(), loop_version(), basic_block_def::preds, prob, single_exit(), split_edge(), update_ssa(), vect_create_cond_for_alias_checks(), and vect_create_cond_for_align_checks().
Referenced by vect_transform_loop().
bool vect_make_slp_decision | ( | loop_vec_info | ) |
bool vect_mark_stmts_to_be_vectorized | ( | loop_vec_info | ) |
int vect_min_worthwhile_factor | ( | enum | tree_code | ) |
void vect_model_load_cost | ( | stmt_vec_info | stmt_info, |
int | ncopies, | ||
bool | load_lanes_p, | ||
slp_tree | slp_node, | ||
stmt_vector_for_cost * | prologue_cost_vec, | ||
stmt_vector_for_cost * | body_cost_vec | ||
) |
Function vect_model_load_cost Models cost for loads. In the case of grouped accesses, the last access has the overhead of the grouped access attributed to it. Since unaligned accesses are supported for loads, we also account for the costs of the access scheme chosen.
References dump_enabled_p(), dump_printf_loc(), exact_log2(), first_stmt(), record_stmt_cost(), scalar_load, vec_construct, vec_perm, vect_body, vect_cost_group_size(), vect_get_load_cost(), vect_location, and vinfo_for_stmt().
Referenced by vect_analyze_slp_cost_1(), and vectorizable_load().
void vect_model_simple_cost | ( | stmt_vec_info | stmt_info, |
int | ncopies, | ||
enum vect_def_type * | dt, | ||
stmt_vector_for_cost * | prologue_cost_vec, | ||
stmt_vector_for_cost * | body_cost_vec | ||
) |
Function vect_model_simple_cost. Models cost for simple operations, i.e. those that only emit ncopies of a single op. Right now, this does not account for multiple insns that could be generated for the single vector op. We will handle that shortly.
References dump_enabled_p(), dump_printf_loc(), record_stmt_cost(), vect_body, vect_constant_def, vect_external_def, vect_location, vect_prologue, and vector_stmt.
Referenced by vectorizable_assignment(), vectorizable_call(), vectorizable_conversion(), vectorizable_operation(), and vectorizable_shift().
void vect_model_store_cost | ( | stmt_vec_info | stmt_info, |
int | ncopies, | ||
bool | store_lanes_p, | ||
enum vect_def_type | dt, | ||
slp_tree | slp_node, | ||
stmt_vector_for_cost * | prologue_cost_vec, | ||
stmt_vector_for_cost * | body_cost_vec | ||
) |
Function vect_model_store_cost Models cost for stores. In the case of grouped accesses, one access has the overhead of the grouped access attributed to it.
References dump_enabled_p(), dump_printf_loc(), exact_log2(), first_stmt(), record_stmt_cost(), scalar_to_vec, vec_perm, vect_body, vect_constant_def, vect_cost_group_size(), vect_external_def, vect_get_store_cost(), vect_location, vect_prologue, and vinfo_for_stmt().
Referenced by vect_analyze_slp_cost_1(), and vectorizable_store().
void vect_pattern_recog | ( | loop_vec_info | , |
bb_vec_info | |||
) |
void vect_permute_store_chain | ( | vec< tree > | dr_chain, |
unsigned int | length, | ||
gimple | stmt, | ||
gimple_stmt_iterator * | gsi, | ||
vec< tree > * | result_chain | ||
) |
Function vect_permute_store_chain. Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be a power of 2, generate interleave_high/low stmts to reorder the data correctly for the stores. Return the final references for stores in RESULT_CHAIN. E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8. The input is 4 vectors each containing 8 elements. We assign a number to each element, the input sequence is: 1st vec: 0 1 2 3 4 5 6 7 2nd vec: 8 9 10 11 12 13 14 15 3rd vec: 16 17 18 19 20 21 22 23 4th vec: 24 25 26 27 28 29 30 31 The output sequence should be: 1st vec: 0 8 16 24 1 9 17 25 2nd vec: 2 10 18 26 3 11 19 27 3rd vec: 4 12 20 28 5 13 21 30 4th vec: 6 14 22 30 7 15 23 31 i.e., we interleave the contents of the four vectors in their order. We use interleave_high/low instructions to create such output. The input of each interleave_high/low operation is two vectors: 1st vec 2nd vec 0 1 2 3 4 5 6 7 the even elements of the result vector are obtained left-to-right from the high/low elements of the first vector. The odd elements of the result are obtained left-to-right from the high/low elements of the second vector. The output of interleave_high will be: 0 4 1 5 and of interleave_low: 2 6 3 7 The permutation is done in log LENGTH stages. In each stage interleave_high and interleave_low stmts are created for each pair of vectors in DR_CHAIN, where the first argument is taken from the first half of DR_CHAIN and the second argument from it's second half. In our example, I1: interleave_high (1st vec, 3rd vec) I2: interleave_low (1st vec, 3rd vec) I3: interleave_high (2nd vec, 4th vec) I4: interleave_low (2nd vec, 4th vec) The output for the first stage is: I1: 0 16 1 17 2 18 3 19 I2: 4 20 5 21 6 22 7 23 I3: 8 24 9 25 10 26 11 27 I4: 12 28 13 29 14 30 15 31 The output of the second stage, i.e. the final result is: I1: 0 8 16 24 1 9 17 25 I2: 2 10 18 26 3 11 19 27 I3: 4 12 20 28 5 13 21 30 I4: 6 14 22 30 7 15 23 31.
References exact_log2(), gimple_build_assign_with_ops(), make_temp_ssa_name(), memcpy(), vect_finish_stmt_generation(), vect_gen_perm_mask(), and vinfo_for_stmt().
Referenced by vectorizable_store().
|
inlinestatic |
Return pow2 (X).
Referenced by vect_model_promotion_demotion_cost(), and vectorizable_conversion().
bool vect_prune_runtime_alias_test_list | ( | loop_vec_info | ) |
void vect_remove_stores | ( | gimple | ) |
bool vect_schedule_slp | ( | loop_vec_info | , |
bb_vec_info | |||
) |
tree vect_setup_realignment | ( | gimple | stmt, |
gimple_stmt_iterator * | gsi, | ||
tree * | realignment_token, | ||
enum dr_alignment_support | alignment_support_scheme, | ||
tree | init_addr, | ||
struct loop ** | at_loop | ||
) |
Function vect_setup_realignment This function is called when vectorizing an unaligned load using the dr_explicit_realign[_optimized] scheme. This function generates the following code at the loop prolog: p = initial_addr; x msq_init = *(floor(p)); # prolog load realignment_token = call target_builtin; loop: x msq = phi (msq_init, ---) The stmts marked with x are generated only for the case of dr_explicit_realign_optimized. The code above sets up a new (vector) pointer, pointing to the first location accessed by STMT, and a "floor-aligned" load using that pointer. It also generates code to compute the "realignment-token" (if the relevant target hook was defined), and creates a phi-node at the loop-header bb whose arguments are the result of the prolog-load (created by this function) and the result of a load that takes place in the loop (to be created by the caller to this function). For the case of dr_explicit_realign_optimized: The caller to this function uses the phi-result (msq) to create the realignment code inside the loop, and sets up the missing phi argument, as follows: loop: msq = phi (msq_init, lsq) lsq = *(floor(p')); # load in loop result = realign_load (msq, lsq, realignment_token); For the case of dr_explicit_realign: loop: msq = *(floor(p)); # load in loop p' = p + (VS-1); lsq = *(floor(p')); # load in loop result = realign_load (msq, lsq, realignment_token); Input: STMT - (scalar) load stmt to be vectorized. This load accesses a memory location that may be unaligned. BSI - place where new code is to be inserted. ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes is used. Output: REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load target hook, if defined. Return value - the result of the loop-header phi node.
References add_phi_arg(), build_int_cst(), copy_ssa_name(), create_phi_node(), dr_explicit_realign, dr_explicit_realign_optimized, DR_REF, gimple_assign_lhs(), gimple_assign_set_lhs(), gimple_bb(), gimple_build_assign_with_ops(), gimple_build_call(), gimple_call_lhs(), gimple_call_return_type(), gimple_call_set_lhs(), gsi_insert_before(), gsi_insert_on_edge_immediate(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, loop::header, HOST_WIDE_INT, loop::inner, loop_preheader_edge(), make_ssa_name(), nested_in_vect_loop_p(), reference_alias_ptr_type(), targetm, tree_int_cst_compare(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), vect_create_destination_var(), and vinfo_for_stmt().
Referenced by vectorizable_load().
bb_vec_info vect_slp_analyze_bb | ( | basic_block | ) |
bool vect_slp_analyze_data_ref_dependences | ( | bb_vec_info | ) |
void vect_slp_transform_bb | ( | basic_block | ) |
bool vect_store_lanes_supported | ( | tree | , |
unsigned | HOST_WIDE_INT | ||
) |
enum dr_alignment_support vect_supportable_dr_alignment | ( | struct data_reference * | dr, |
bool | check_aligned_accesses | ||
) |
Return whether the data reference DR is supported with respect to its alignment. If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even it is aligned, i.e., check if it is possible to vectorize it with different alignment.
References aligned_access_p(), dr_aligned, dr_explicit_realign, dr_explicit_realign_optimized, DR_IS_READ, DR_REF, DR_STEP, DR_STMT, dr_unaligned_supported, dr_unaligned_unsupported, known_alignment_for_access_p(), nested_in_vect_loop_p(), not_size_aligned(), optab_handler(), targetm, and vinfo_for_stmt().
Referenced by vect_build_slp_tree_1(), vect_enhance_data_refs_alignment(), vect_get_load_cost(), vect_get_store_cost(), vect_peeling_hash_insert(), vect_supported_load_permutation_p(), vect_verify_datarefs_alignment(), vect_vfa_segment_size(), vectorizable_load(), and vectorizable_store().
bool vect_supportable_shift | ( | enum | tree_code, |
tree | |||
) |
void vect_transform_grouped_load | ( | gimple | stmt, |
vec< tree > | dr_chain, | ||
int | size, | ||
gimple_stmt_iterator * | gsi | ||
) |
Function vect_transform_grouped_load. Given a chain of input interleaved data-refs (in DR_CHAIN), build statements to perform their permutation and ascribe the result vectorized statements to the scalar statements.
References vect_permute_load_chain(), vect_record_grouped_load_vectors(), and vNULL.
Referenced by vectorizable_load().
void vect_transform_loop | ( | loop_vec_info | ) |
Drive for loop transformation stage.
bool vect_transform_slp_perm_load | ( | slp_tree | node, |
vec< tree > | dr_chain, | ||
gimple_stmt_iterator * | gsi, | ||
int | vf, | ||
slp_instance | slp_node_instance, | ||
bool | analyze_only | ||
) |
Generate vector permute statements from a list of loads in DR_CHAIN. If ANALYZE_ONLY is TRUE, only check that it is possible to create valid permute statements for the SLP node NODE of the SLP instance SLP_NODE_INSTANCE.
References build_int_cst(), can_vec_perm_p(), dump_enabled_p(), dump_gimple_stmt(), dump_printf(), dump_printf_loc(), get_vectype_for_scalar_type(), int_mode_for_mode(), lang_hooks_for_types::type_for_mode, lang_hooks::types, vect_create_mask_and_perm(), vect_get_mask_element(), vect_location, and vinfo_for_stmt().
Referenced by vect_supported_load_permutation_p(), and vectorizable_load().
bool vect_transform_stmt | ( | gimple | stmt, |
gimple_stmt_iterator * | gsi, | ||
bool * | grouped_store, | ||
slp_tree | slp_node, | ||
slp_instance | slp_node_instance | ||
) |
Function vect_transform_stmt. Create a vectorized stmt to replace STMT, and insert it at BSI.
References assignment_vec_info_type, call_vec_info_type, condition_vec_info_type, dump_enabled_p(), dump_printf_loc(), flow_bb_inside_loop_p(), gimple_assign_lhs(), gsi_stmt(), induc_vec_info_type, loop::inner, load_vec_info_type, nested_in_vect_loop_p(), op_vec_info_type, reduc_vec_info_type, shift_vec_info_type, store_vec_info_type, type_conversion_vec_info_type, type_demotion_vec_info_type, type_promotion_vec_info_type, vect_location, vect_used_in_outer, vect_used_in_outer_by_reduction, vectorizable_assignment(), vectorizable_call(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), vectorizable_store(), and vinfo_for_stmt().
Referenced by vect_schedule_slp_instance(), and vect_transform_loop().
void vect_update_slp_costs_according_to_vf | ( | loop_vec_info | ) |
bool vect_verify_datarefs_alignment | ( | loop_vec_info | , |
bb_vec_info | |||
) |
bool vectorizable_condition | ( | gimple | stmt, |
gimple_stmt_iterator * | gsi, | ||
gimple * | vec_stmt, | ||
tree | reduc_def, | ||
int | reduc_index, | ||
slp_tree | slp_node | ||
) |
vectorizable_condition. Check if STMT is conditional modify expression that can be vectorized. If VEC_STMT is also passed, vectorize the STMT: create a vectorized stmt using VEC_COND_EXPR to replace it, put it in VEC_STMT, and insert it at GSI. When STMT is vectorized as nested cycle, REDUC_DEF is the vector variable to be used at REDUC_INDEX (in then clause if REDUC_INDEX is 1, and in else caluse if it is 2). Return FALSE if not a vectorizable STMT, TRUE otherwise.
References build_nonstandard_integer_type(), condition_vec_info_type, dump_enabled_p(), dump_printf_loc(), expand_vec_cond_expr_p(), get_same_sized_vectype(), gimple_assign_lhs(), gimple_assign_rhs1(), gimple_assign_rhs2(), gimple_assign_rhs3(), gimple_assign_rhs_code(), gimple_assign_set_lhs(), is_gimple_assign(), make_ssa_name(), vect_create_destination_var(), vect_finish_stmt_generation(), vect_get_slp_defs(), vect_get_vec_def_for_operand(), vect_get_vec_def_for_stmt_copy(), vect_internal_def, vect_is_simple_cond(), vect_is_simple_use(), vect_location, vect_nested_cycle, vinfo_for_stmt(), and vNULL.
Referenced by vect_analyze_stmt(), vect_transform_stmt(), and vectorizable_reduction().
bool vectorizable_induction | ( | gimple | phi, |
gimple_stmt_iterator * | gsi, | ||
gimple * | vec_stmt | ||
) |
Function vectorizable_induction Check if PHI performs an induction computation that can be vectorized. If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized phi to replace it, put it in VEC_STMT, and add it to the same basic block. Return FALSE if not a vectorizable STMT, TRUE otherwise.
Transform. *
References dump_enabled_p(), dump_printf_loc(), flow_bb_inside_loop_p(), get_initial_def_for_induction(), induc_vec_info_type, loop::inner, loop_latch_edge(), nested_in_vect_loop_p(), vect_induction_def, vect_location, vect_model_induction_cost(), and vinfo_for_stmt().
Referenced by vect_analyze_loop_operations(), and vect_transform_stmt().
bool vectorizable_live_operation | ( | gimple | stmt, |
gimple_stmt_iterator * | gsi, | ||
gimple * | vec_stmt | ||
) |
Function vectorizable_live_operation. STMT computes a value that is used outside the loop. Check if it can be supported.
References binary_op, build_int_cst(), edge_def::dest, edge_def::dest_idx, dump_enabled_p(), dump_printf_loc(), get_gimple_rhs_class(), gimple_assign_lhs(), gimple_assign_rhs_code(), GIMPLE_BINARY_RHS, gimple_call_arg(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_call_lhs(), gimple_op(), GIMPLE_SINGLE_RHS, GIMPLE_UNARY_RHS, is_gimple_assign(), nested_in_vect_loop_p(), loop::simduid, single_exit(), unary_op, vect_constant_def, vect_external_def, vect_is_simple_use(), vect_location, vect_reduction_def, _loop_vec_info::vectorization_factor, and vinfo_for_stmt().
Referenced by vect_analyze_stmt(), and vect_transform_stmt().
bool vectorizable_reduction | ( | gimple | stmt, |
gimple_stmt_iterator * | gsi, | ||
gimple * | vec_stmt, | ||
slp_tree | slp_node | ||
) |
Function vectorizable_reduction. Check if STMT performs a reduction operation that can be vectorized. If VEC_STMT is also passed, vectorize the STMT: create a vectorized stmt to replace it, put it in VEC_STMT, and insert it at GSI. Return FALSE if not a vectorizable STMT, TRUE otherwise. This function also handles reduction idioms (patterns) that have been recognized in advance during vect_pattern_recog. In this case, STMT may be of this form: X = pattern_expr (arg0, arg1, ..., X) and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original sequence that had been detected and replaced by the pattern-stmt (STMT). In some cases of reduction patterns, the type of the reduction variable X is different than the type of the other arguments of STMT. In such cases, the vectype that is used when transforming STMT into a vector stmt is different than the vectype that is used to determine the vectorization factor, because it consists of a different number of elements than the actual number of elements that are being operated upon in parallel. For example, consider an accumulation of shorts into an int accumulator. On some targets it's possible to vectorize this pattern operating on 8 shorts at a time (hence, the vectype for purposes of determining the vectorization factor should be V8HI); on the other hand, the vectype that is used to create the vector form is actually V4SI (the type of the result). Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that indicates what is the actual level of parallelism (V8HI in the example), so that the right vectorization factor would be derived. This vectype corresponds to the type of arguments to the reduction stmt, and should *NOT* be used to create the vectorized stmt. The right vectype for the vectorized stmt is obtained from the type of the result X: get_vectype_for_scalar_type (TREE_TYPE (X)) This means that, contrary to "regular" reductions (or "regular" stmts in general), the following equation: STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X)) does *NOT* necessarily hold for reduction patterns.
Transform. *
References binary_op, create_phi_node(), dump_enabled_p(), dump_printf(), dump_printf_loc(), flow_bb_inside_loop_p(), get_gimple_rhs_class(), gimple_assign_lhs(), gimple_assign_rhs1(), gimple_assign_rhs2(), gimple_assign_rhs3(), gimple_assign_rhs_code(), gimple_assign_set_lhs(), gimple_bb(), GIMPLE_BINARY_RHS, GIMPLE_SINGLE_RHS, GIMPLE_TERNARY_RHS, GIMPLE_UNARY_RHS, loop::header, loop::inner, is_gimple_assign(), basic_block_def::loop_father, loop_preheader_edge(), make_ssa_name(), nested_in_vect_loop_p(), new_stmt_vec_info(), optab_default, optab_for_tree_code(), optab_handler(), phis, reduc_vec_info_type, reduction_code_for_scalar_code(), set_vinfo_for_stmt(), ternary_op, types_compatible_p(), vect_constant_def, vect_create_destination_var(), vect_create_epilog_for_reduction(), vect_double_reduction_def, vect_external_def, vect_finish_stmt_generation(), vect_get_vec_def_for_operand(), vect_get_vec_def_for_stmt_copy(), vect_get_vec_defs(), vect_induction_def, vect_internal_def, vect_is_simple_reduction(), vect_is_simple_use(), vect_is_simple_use_1(), vect_location, vect_min_worthwhile_factor(), vect_model_reduction_cost(), vect_nested_cycle, vect_reduction_def, vect_unused_in_scope, vect_used_in_outer, vectorizable_condition(), vinfo_for_stmt(), and vNULL.
Referenced by vect_analyze_stmt(), and vect_transform_stmt().
unsigned vectorize_loops | ( | void | ) |
In tree-vectorizer.c.
Function vectorize_loops. Entry point to loop vectorization phase.
References adjust_simduid_builtins(), loop::aux, hash_table< Descriptor, Allocator >::begin(), build_array_type_nelts(), cfun, hash_table< Descriptor, Allocator >::create(), decl::decl, destroy_loop_vec_info(), hash_table< Descriptor, Allocator >::dispose(), dump_enabled_p(), dump_printf(), dump_printf_loc(), hash_table< Descriptor, Allocator >::end(), hash_table< Descriptor, Allocator >::find(), find_loop_location(), hash_table< Descriptor, Allocator >::find_slot(), loop::force_vect, free_stmt_vec_info_vec(), get_loop(), function::has_simduid_loops, init_stmt_vec_info_vec(), hash_table< Descriptor, Allocator >::is_created(), note_simd_array_uses(), number_of_loops(), optimize_loop_nest_for_speed_p(), relayout_decl(), rewrite_into_loop_closed_ssa(), simduid_to_vf::simduid, loop::simduid, statistics_counter_event(), vect_analyze_loop(), vect_location, vect_transform_loop(), _loop_vec_info::vectorization_factor, and simduid_to_vf::vf.
Referenced by tree_vectorize().
|
inlinestatic |
Return a stmt_vec_info corresponding to STMT.
References gimple_uid().
Referenced by adjust_bool_pattern(), adjust_bool_pattern_cast(), bump_vector_ptr(), destroy_bb_vec_info(), exist_non_indexing_operands_for_use_p(), free_stmt_vec_info(), get_initial_def_for_induction(), get_initial_def_for_reduction(), is_pattern_stmt_p(), new_loop_vec_info(), process_use(), supportable_widening_operation(), type_conversion_p(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_group_access(), vect_analyze_loop_form(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp_cost(), vect_analyze_slp_cost_1(), vect_analyze_slp_instance(), vect_analyze_stmt(), vect_bb_slp_scalar_cost(), vect_bb_vectorization_profitable_p(), vect_build_slp_tree(), vect_build_slp_tree_1(), vect_can_advance_ivs_p(), vect_check_gather(), vect_compute_data_ref_alignment(), vect_compute_data_refs_alignment(), vect_create_addr_base_for_vector_ref(), vect_create_cond_for_alias_checks(), vect_create_cond_for_align_checks(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_create_mask_and_perm(), vect_create_vectorized_demotion_stmts(), vect_detect_hybrid_slp_stmts(), vect_determine_vectorization_factor(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_find_same_alignment_drs(), vect_finish_stmt_generation(), vect_gen_niters_for_prolog_loop(), vect_get_and_check_slp_defs(), vect_get_constant_vectors(), vect_get_data_access_cost(), vect_get_load_cost(), vect_get_place_in_interleaving_chain(), vect_get_single_scalar_iteration_cost(), vect_get_slp_defs(), vect_get_store_cost(), vect_get_vec_def_for_operand(), vect_get_vec_def_for_stmt_copy(), vect_handle_widen_op_by_const(), vect_init_vector_1(), vect_is_simple_reduction_1(), vect_is_simple_use(), vect_is_simple_use_1(), vect_is_slp_reduction(), vect_mark_pattern_stmts(), vect_mark_relevant(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_mark_stmts_to_be_vectorized(), vect_model_load_cost(), vect_model_store_cost(), vect_operation_fits_smaller_type(), vect_pattern_recog(), vect_pattern_recog_1(), vect_peeling_hash_get_lowest_cost(), vect_permute_load_chain(), vect_permute_store_chain(), vect_recog_bool_pattern(), vect_recog_divmod_pattern(), vect_recog_dot_prod_pattern(), vect_recog_mixed_size_cond_pattern(), vect_recog_over_widening_pattern(), vect_recog_rotate_pattern(), vect_recog_vector_vector_shift_pattern(), vect_recog_widen_shift_pattern(), vect_recog_widen_sum_pattern(), vect_record_grouped_load_vectors(), vect_remove_slp_scalar_calls(), vect_remove_stores(), vect_same_loop_or_bb_p(), vect_same_range_drs(), vect_schedule_slp(), vect_schedule_slp_instance(), vect_setup_realignment(), vect_slp_analyze_data_ref_dependence(), vect_slp_analyze_node_operations(), vect_slp_transform_bb(), vect_stmt_relevant_p(), vect_supportable_dr_alignment(), vect_supported_load_permutation_p(), vect_transform_loop(), vect_transform_slp_perm_load(), vect_transform_stmt(), vect_update_ivs_after_vectorizer(), vect_update_misalignment_for_peel(), vect_update_slp_costs_according_to_vf(), vect_verify_datarefs_alignment(), vect_vfa_segment_size(), vector_alignment_reachable_p(), vectorizable_assignment(), vectorizable_call(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), and vectorizable_store().
unsigned int current_vector_size |
In tree-vect-stmts.c.
Referenced by get_vectype_for_scalar_type(), vect_analyze_loop(), and vect_slp_analyze_bb().
vec<vec_void_p> stmt_vec_info_vec |
Vector mapping GIMPLE stmt to stmt_vec_info.
Referenced by free_stmt_vec_info_vec(), and init_stmt_vec_info_vec().
LOC vect_location |
Source location
@verbatim Vectorizer
Copyright (C) 2003-2013 Free Software Foundation, Inc. Contributed by Dorit Naishlos dorit @il. ibm.c om
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see http://www.gnu.org/licenses/.
Loop and basic block vectorizer. This file contains drivers for the three vectorizers: (1) loop vectorizer (inter-iteration parallelism), (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop vectorizer) (3) BB vectorizer (out-of-loops), aka SLP The rest of the vectorizer's code is organized as follows: - tree-vect-loop.c - loop specific parts such as reductions, etc. These are used by drivers (1) and (2). - tree-vect-loop-manip.c - vectorizer's loop control-flow utilities, used by drivers (1) and (2). - tree-vect-slp.c - BB vectorization specific analysis and transformation, used by drivers (2) and (3). - tree-vect-stmts.c - statements analysis and transformation (used by all). - tree-vect-data-refs.c - vectorizer specific data-refs analysis and manipulations (used by all). - tree-vect-patterns.c - vectorizable code patterns detector (used by all) Here's a poor attempt at illustrating that: tree-vectorizer.c: loop_vect() loop_aware_slp() slp_vect() | / \ / | / \ / tree-vect-loop.c tree-vect-slp.c | \ \ / / | | \ \/ / | | \ /\ / | | \ / \ / | tree-vect-stmts.c tree-vect-data-refs.c \ / tree-vect-patterns.c
Loop or bb location.
Referenced by execute_vect_slp(), get_initial_def_for_induction(), increase_alignment(), process_use(), report_vect_op(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_ref_dependences(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_group_access(), vect_analyze_loop(), vect_analyze_loop_1(), vect_analyze_loop_2(), vect_analyze_loop_form(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_analyze_stmt(), vect_bb_vectorization_profitable_p(), vect_build_slp_tree_1(), vect_can_advance_ivs_p(), vect_compute_data_ref_alignment(), vect_create_addr_base_for_vector_ref(), vect_create_cond_for_alias_checks(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_vectorization_factor(), vect_do_peeling_for_alignment(), vect_do_peeling_for_loop_bound(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_find_same_alignment_drs(), vect_finish_stmt_generation(), vect_gen_niters_for_prolog_loop(), vect_get_and_check_slp_defs(), vect_get_data_access_cost(), vect_get_known_peeling_cost(), vect_get_load_cost(), vect_get_loop_niters(), vect_get_mask_element(), vect_get_store_cost(), vect_get_vec_def_for_operand(), vect_grouped_load_supported(), vect_grouped_store_supported(), vect_init_vector_1(), vect_is_simple_iv_evolution(), vect_is_simple_reduction_1(), vect_is_simple_use(), vect_is_slp_reduction(), vect_lanes_optab_supported_p(), vect_loop_kill_debug_uses(), vect_make_slp_decision(), vect_mark_for_runtime_alias_test(), vect_mark_relevant(), vect_mark_stmts_to_be_vectorized(), vect_model_induction_cost(), vect_model_load_cost(), vect_model_promotion_demotion_cost(), vect_model_reduction_cost(), vect_model_simple_cost(), vect_model_store_cost(), vect_pattern_recog(), vect_pattern_recog_1(), vect_prune_runtime_alias_test_list(), vect_recog_bool_pattern(), vect_recog_divmod_pattern(), vect_recog_dot_prod_pattern(), vect_recog_mixed_size_cond_pattern(), vect_recog_over_widening_pattern(), vect_recog_rotate_pattern(), vect_recog_vector_vector_shift_pattern(), vect_recog_widen_mult_pattern(), vect_recog_widen_shift_pattern(), vect_recog_widen_sum_pattern(), vect_schedule_slp(), vect_schedule_slp_instance(), vect_slp_analyze_bb(), vect_slp_analyze_bb_1(), vect_slp_analyze_data_ref_dependence(), vect_slp_analyze_data_ref_dependences(), vect_slp_transform_bb(), vect_stmt_relevant_p(), vect_supported_load_permutation_p(), vect_transform_loop(), vect_transform_slp_perm_load(), vect_transform_stmt(), vect_update_inits_of_drs(), vect_update_ivs_after_vectorizer(), vect_update_misalignment_for_peel(), vect_update_slp_costs_according_to_vf(), vect_verify_datarefs_alignment(), vector_alignment_reachable_p(), vectorizable_assignment(), vectorizable_call(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), vectorizable_store(), and vectorize_loops().