本文整理汇总了C++中build2函数的典型用法代码示例。如果您正苦于以下问题:C++ build2函数的具体用法?C++ build2怎么用?C++ build2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了build2函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的C++代码示例。
示例1: ubsan_maybe_instrument_array_ref
void
ubsan_maybe_instrument_array_ref (tree *expr_p, bool ignore_off_by_one)
{
if (!ubsan_array_ref_instrumented_p (*expr_p)
&& current_function_decl != NULL_TREE
&& !lookup_attribute ("no_sanitize_undefined",
DECL_ATTRIBUTES (current_function_decl)))
{
tree op0 = TREE_OPERAND (*expr_p, 0);
tree op1 = TREE_OPERAND (*expr_p, 1);
tree e = ubsan_instrument_bounds (EXPR_LOCATION (*expr_p), op0, &op1,
ignore_off_by_one);
if (e != NULL_TREE)
{
tree t = copy_node (*expr_p);
TREE_OPERAND (t, 1) = build2 (COMPOUND_EXPR, TREE_TYPE (op1),
e, op1);
*expr_p = t;
}
}
}
开发者ID:Phobos37,项目名称:gcc,代码行数:21,代码来源:c-ubsan.c
示例2: gfc_trans_omp_parallel_do
static tree
gfc_trans_omp_parallel_do (gfc_code *code)
{
stmtblock_t block, *pblock = NULL;
gfc_omp_clauses parallel_clauses, do_clauses;
tree stmt, omp_clauses = NULL_TREE;
gfc_start_block (&block);
memset (&do_clauses, 0, sizeof (do_clauses));
if (code->ext.omp_clauses != NULL)
{
memcpy (¶llel_clauses, code->ext.omp_clauses,
sizeof (parallel_clauses));
do_clauses.sched_kind = parallel_clauses.sched_kind;
do_clauses.chunk_size = parallel_clauses.chunk_size;
do_clauses.ordered = parallel_clauses.ordered;
do_clauses.collapse = parallel_clauses.collapse;
parallel_clauses.sched_kind = OMP_SCHED_NONE;
parallel_clauses.chunk_size = NULL;
parallel_clauses.ordered = false;
parallel_clauses.collapse = 0;
omp_clauses = gfc_trans_omp_clauses (&block, ¶llel_clauses,
code->loc);
}
do_clauses.nowait = true;
if (!do_clauses.ordered && do_clauses.sched_kind != OMP_SCHED_STATIC)
pblock = █
else
pushlevel (0);
stmt = gfc_trans_omp_do (code, pblock, &do_clauses, omp_clauses);
if (TREE_CODE (stmt) != BIND_EXPR)
stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0, 0));
else
poplevel (0, 0, 0);
stmt = build2 (OMP_PARALLEL, void_type_node, stmt, omp_clauses);
OMP_PARALLEL_COMBINED (stmt) = 1;
gfc_add_expr_to_block (&block, stmt);
return gfc_finish_block (&block);
}
开发者ID:Scorpiion,项目名称:Renux_cross_gcc,代码行数:40,代码来源:trans-openmp.c
示例3: putVolatile_builtin
static tree
putVolatile_builtin (tree method_return_type ATTRIBUTE_UNUSED,
tree orig_call)
{
tree addr, stmt, modify_stmt;
UNMARSHAL4 (orig_call);
addr = build_addr_sum (value_type, obj_arg, offset_arg);
addr
= fold_convert (build_pointer_type (build_type_variant (value_type, 0, 1)),
addr);
stmt = build_call_expr (builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE), 0);
modify_stmt = fold_build2 (MODIFY_EXPR, value_type,
build_java_indirect_ref (value_type, addr,
flag_check_references),
value_arg);
stmt = build2 (COMPOUND_EXPR, TREE_TYPE (modify_stmt),
stmt, modify_stmt);
return build_check_this (stmt, this_arg);
}
开发者ID:Nodplus,项目名称:gcc,代码行数:22,代码来源:builtins.c
示例4: thunk_adjust
static tree
thunk_adjust (tree ptr, bool this_adjusting,
HOST_WIDE_INT fixed_offset, tree virtual_offset)
{
if (this_adjusting)
/* Adjust the pointer by the constant. */
ptr = fold_build2 (PLUS_EXPR, TREE_TYPE (ptr), ptr,
ssize_int (fixed_offset));
/* If there's a virtual offset, look up that value in the vtable and
adjust the pointer again. */
if (virtual_offset)
{
tree vtable;
ptr = save_expr (ptr);
/* The vptr is always at offset zero in the object. */
vtable = build1 (NOP_EXPR,
build_pointer_type (build_pointer_type
(vtable_entry_type)),
ptr);
/* Form the vtable address. */
vtable = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (vtable)), vtable);
/* Find the entry with the vcall offset. */
vtable = build2 (PLUS_EXPR, TREE_TYPE (vtable), vtable, virtual_offset);
/* Get the offset itself. */
vtable = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (vtable)), vtable);
/* Adjust the `this' pointer. */
ptr = fold_build2 (PLUS_EXPR, TREE_TYPE (ptr), ptr, vtable);
}
if (!this_adjusting)
/* Adjust the pointer by the constant. */
ptr = fold_build2 (PLUS_EXPR, TREE_TYPE (ptr), ptr,
ssize_int (fixed_offset));
return ptr;
}
开发者ID:asdlei00,项目名称:freebsd,代码行数:38,代码来源:method.c
示例5: gfc_trans_omp_parallel_workshare
static tree
gfc_trans_omp_parallel_workshare (gfc_code *code)
{
stmtblock_t block;
gfc_omp_clauses workshare_clauses;
tree stmt, omp_clauses;
memset (&workshare_clauses, 0, sizeof (workshare_clauses));
workshare_clauses.nowait = true;
gfc_start_block (&block);
omp_clauses = gfc_trans_omp_clauses (&block, code->ext.omp_clauses,
code->loc);
pushlevel (0);
stmt = gfc_trans_omp_workshare (code, &workshare_clauses);
if (TREE_CODE (stmt) != BIND_EXPR)
stmt = build3_v (BIND_EXPR, NULL, stmt, poplevel (1, 0, 0));
else
poplevel (0, 0, 0);
stmt = build2 (OMP_PARALLEL, void_type_node, stmt, omp_clauses);
OMP_PARALLEL_COMBINED (stmt) = 1;
gfc_add_expr_to_block (&block, stmt);
return gfc_finish_block (&block);
}
开发者ID:Scorpiion,项目名称:Renux_cross_gcc,代码行数:24,代码来源:trans-openmp.c
示例6: mf_build_check_statement_for
static void
mf_build_check_statement_for (tree base, tree limit,
gimple_stmt_iterator *instr_gsi,
location_t location, tree dirflag)
{
gimple_stmt_iterator gsi;
basic_block cond_bb, then_bb, join_bb;
edge e;
tree cond, t, u, v;
tree mf_base;
tree mf_elem;
tree mf_limit;
gimple g;
gimple_seq seq, stmts;
/* We first need to split the current basic block, and start altering
the CFG. This allows us to insert the statements we're about to
construct into the right basic blocks. */
cond_bb = gimple_bb (gsi_stmt (*instr_gsi));
gsi = *instr_gsi;
gsi_prev (&gsi);
if (! gsi_end_p (gsi))
e = split_block (cond_bb, gsi_stmt (gsi));
else
e = split_block_after_labels (cond_bb);
cond_bb = e->src;
join_bb = e->dest;
/* A recap at this point: join_bb is the basic block at whose head
is the gimple statement for which this check expression is being
built. cond_bb is the (possibly new, synthetic) basic block the
end of which will contain the cache-lookup code, and a
conditional that jumps to the cache-miss code or, much more
likely, over to join_bb. */
/* Create the bb that contains the cache-miss fallback block (mf_check). */
then_bb = create_empty_bb (cond_bb);
make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
make_single_succ_edge (then_bb, join_bb, EDGE_FALLTHRU);
/* Mark the pseudo-fallthrough edge from cond_bb to join_bb. */
e = find_edge (cond_bb, join_bb);
e->flags = EDGE_FALSE_VALUE;
e->count = cond_bb->count;
e->probability = REG_BR_PROB_BASE;
/* Update dominance info. Note that bb_join's data was
updated by split_block. */
if (dom_info_available_p (CDI_DOMINATORS))
{
set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
}
/* Update loop info. */
if (current_loops)
add_bb_to_loop (then_bb, cond_bb->loop_father);
/* Build our local variables. */
mf_elem = create_tmp_reg (mf_cache_structptr_type, "__mf_elem");
mf_base = create_tmp_reg (mf_uintptr_type, "__mf_base");
mf_limit = create_tmp_reg (mf_uintptr_type, "__mf_limit");
/* Build: __mf_base = (uintptr_t) <base address expression>. */
seq = NULL;
t = fold_convert_loc (location, mf_uintptr_type,
unshare_expr (base));
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
g = gimple_build_assign (mf_base, t);
gimple_set_location (g, location);
gimple_seq_add_stmt (&seq, g);
/* Build: __mf_limit = (uintptr_t) <limit address expression>. */
t = fold_convert_loc (location, mf_uintptr_type,
unshare_expr (limit));
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
g = gimple_build_assign (mf_limit, t);
gimple_set_location (g, location);
gimple_seq_add_stmt (&seq, g);
/* Build: __mf_elem = &__mf_lookup_cache [(__mf_base >> __mf_shift)
& __mf_mask]. */
t = build2 (RSHIFT_EXPR, mf_uintptr_type, mf_base,
flag_mudflap_threads ? mf_cache_shift_decl
: mf_cache_shift_decl_l);
t = build2 (BIT_AND_EXPR, mf_uintptr_type, t,
flag_mudflap_threads ? mf_cache_mask_decl
: mf_cache_mask_decl_l);
t = build4 (ARRAY_REF,
TREE_TYPE (TREE_TYPE (mf_cache_array_decl)),
mf_cache_array_decl, t, NULL_TREE, NULL_TREE);
t = build1 (ADDR_EXPR, mf_cache_structptr_type, t);
t = force_gimple_operand (t, &stmts, false, NULL_TREE);
gimple_seq_add_seq (&seq, stmts);
g = gimple_build_assign (mf_elem, t);
gimple_set_location (g, location);
gimple_seq_add_stmt (&seq, g);
//.........这里部分代码省略.........
开发者ID:ChaosJohn,项目名称:gcc,代码行数:101,代码来源:tree-mudflap.c
示例7: build_throw
//.........这里部分代码省略.........
exp_vec = make_tree_vector_single (moved);
moved = (build_special_member_call
(object, complete_ctor_identifier, &exp_vec,
TREE_TYPE (object), flags|LOOKUP_PREFER_RVALUE,
tf_none));
release_tree_vector (exp_vec);
if (moved != error_mark_node)
{
exp = moved;
converted = true;
}
}
/* Call the copy constructor. */
if (!converted)
{
exp_vec = make_tree_vector_single (exp);
exp = (build_special_member_call
(object, complete_ctor_identifier, &exp_vec,
TREE_TYPE (object), flags, tf_warning_or_error));
release_tree_vector (exp_vec);
}
if (exp == error_mark_node)
{
error (" in thrown expression");
return error_mark_node;
}
}
else
{
tmp = decay_conversion (exp, tf_warning_or_error);
if (tmp == error_mark_node)
return error_mark_node;
exp = build2 (INIT_EXPR, temp_type, object, tmp);
}
/* Mark any cleanups from the initialization as MUST_NOT_THROW, since
they are run after the exception object is initialized. */
cp_walk_tree_without_duplicates (&exp, wrap_cleanups_r, 0);
/* Prepend the allocation. */
exp = build2 (COMPOUND_EXPR, TREE_TYPE (exp), allocate_expr, exp);
/* Force all the cleanups to be evaluated here so that we don't have
to do them during unwinding. */
exp = build1 (CLEANUP_POINT_EXPR, void_type_node, exp);
throw_type = build_eh_type_type (prepare_eh_type (TREE_TYPE (object)));
cleanup = NULL_TREE;
if (type_build_dtor_call (TREE_TYPE (object)))
{
tree dtor_fn = lookup_fnfields (TYPE_BINFO (TREE_TYPE (object)),
complete_dtor_identifier, 0);
dtor_fn = BASELINK_FUNCTIONS (dtor_fn);
mark_used (dtor_fn);
if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TREE_TYPE (object)))
{
cxx_mark_addressable (dtor_fn);
/* Pretend it's a normal function. */
cleanup = build1 (ADDR_EXPR, cleanup_type, dtor_fn);
}
}
if (cleanup == NULL_TREE)
cleanup = build_int_cst (cleanup_type, 0);
/* ??? Indicate that this function call throws throw_type. */
tmp = cp_build_function_call_nary (throw_fn, tf_warning_or_error,
ptr, throw_type, cleanup, NULL_TREE);
/* Tack on the initialization stuff. */
exp = build2 (COMPOUND_EXPR, TREE_TYPE (tmp), exp, tmp);
}
else
{
/* Rethrow current exception. */
if (!rethrow_fn)
{
tree name = get_identifier ("__cxa_rethrow");
rethrow_fn = get_global_binding (name);
if (!rethrow_fn)
/* Declare void __cxa_rethrow (void). */
rethrow_fn = push_throw_library_fn
(name, build_function_type_list (void_type_node, NULL_TREE));
if (flag_tm)
apply_tm_attr (rethrow_fn, get_identifier ("transaction_pure"));
}
/* ??? Indicate that this function call allows exceptions of the type
of the enclosing catch block (if known). */
exp = cp_build_function_call_vec (rethrow_fn, NULL, tf_warning_or_error);
}
exp = build1 (THROW_EXPR, void_type_node, exp);
SET_EXPR_LOCATION (exp, input_location);
return exp;
}
开发者ID:kusumi,项目名称:DragonFlyBSD,代码行数:101,代码来源:except.c
示例8: aarch64_atomic_assign_expand_fenv
void
aarch64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
{
const unsigned AARCH64_FE_INVALID = 1;
const unsigned AARCH64_FE_DIVBYZERO = 2;
const unsigned AARCH64_FE_OVERFLOW = 4;
const unsigned AARCH64_FE_UNDERFLOW = 8;
const unsigned AARCH64_FE_INEXACT = 16;
const unsigned HOST_WIDE_INT AARCH64_FE_ALL_EXCEPT = (AARCH64_FE_INVALID
| AARCH64_FE_DIVBYZERO
| AARCH64_FE_OVERFLOW
| AARCH64_FE_UNDERFLOW
| AARCH64_FE_INEXACT);
const unsigned HOST_WIDE_INT AARCH64_FE_EXCEPT_SHIFT = 8;
tree fenv_cr, fenv_sr, get_fpcr, set_fpcr, mask_cr, mask_sr;
tree ld_fenv_cr, ld_fenv_sr, masked_fenv_cr, masked_fenv_sr, hold_fnclex_cr;
tree hold_fnclex_sr, new_fenv_var, reload_fenv, restore_fnenv, get_fpsr, set_fpsr;
tree update_call, atomic_feraiseexcept, hold_fnclex, masked_fenv, ld_fenv;
/* Generate the equivalence of :
unsigned int fenv_cr;
fenv_cr = __builtin_aarch64_get_fpcr ();
unsigned int fenv_sr;
fenv_sr = __builtin_aarch64_get_fpsr ();
Now set all exceptions to non-stop
unsigned int mask_cr
= ~(AARCH64_FE_ALL_EXCEPT << AARCH64_FE_EXCEPT_SHIFT);
unsigned int masked_cr;
masked_cr = fenv_cr & mask_cr;
And clear all exception flags
unsigned int maske_sr = ~AARCH64_FE_ALL_EXCEPT;
unsigned int masked_cr;
masked_sr = fenv_sr & mask_sr;
__builtin_aarch64_set_cr (masked_cr);
__builtin_aarch64_set_sr (masked_sr); */
fenv_cr = create_tmp_var (unsigned_type_node, NULL);
fenv_sr = create_tmp_var (unsigned_type_node, NULL);
get_fpcr = aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPCR];
set_fpcr = aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPCR];
get_fpsr = aarch64_builtin_decls[AARCH64_BUILTIN_GET_FPSR];
set_fpsr = aarch64_builtin_decls[AARCH64_BUILTIN_SET_FPSR];
mask_cr = build_int_cst (unsigned_type_node,
~(AARCH64_FE_ALL_EXCEPT << AARCH64_FE_EXCEPT_SHIFT));
mask_sr = build_int_cst (unsigned_type_node,
~(AARCH64_FE_ALL_EXCEPT));
ld_fenv_cr = build2 (MODIFY_EXPR, unsigned_type_node,
fenv_cr, build_call_expr (get_fpcr, 0));
ld_fenv_sr = build2 (MODIFY_EXPR, unsigned_type_node,
fenv_sr, build_call_expr (get_fpsr, 0));
masked_fenv_cr = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_cr, mask_cr);
masked_fenv_sr = build2 (BIT_AND_EXPR, unsigned_type_node, fenv_sr, mask_sr);
hold_fnclex_cr = build_call_expr (set_fpcr, 1, masked_fenv_cr);
hold_fnclex_sr = build_call_expr (set_fpsr, 1, masked_fenv_sr);
hold_fnclex = build2 (COMPOUND_EXPR, void_type_node, hold_fnclex_cr,
hold_fnclex_sr);
masked_fenv = build2 (COMPOUND_EXPR, void_type_node, masked_fenv_cr,
masked_fenv_sr);
ld_fenv = build2 (COMPOUND_EXPR, void_type_node, ld_fenv_cr, ld_fenv_sr);
*hold = build2 (COMPOUND_EXPR, void_type_node,
build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv),
hold_fnclex);
/* Store the value of masked_fenv to clear the exceptions:
__builtin_aarch64_set_fpsr (masked_fenv_sr); */
*clear = build_call_expr (set_fpsr, 1, masked_fenv_sr);
/* Generate the equivalent of :
unsigned int new_fenv_var;
new_fenv_var = __builtin_aarch64_get_fpsr ();
__builtin_aarch64_set_fpsr (fenv_sr);
__atomic_feraiseexcept (new_fenv_var); */
new_fenv_var = create_tmp_var (unsigned_type_node, NULL);
reload_fenv = build2 (MODIFY_EXPR, unsigned_type_node,
new_fenv_var, build_call_expr (get_fpsr, 0));
restore_fnenv = build_call_expr (set_fpsr, 1, fenv_sr);
atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT);
update_call = build_call_expr (atomic_feraiseexcept, 1,
fold_convert (integer_type_node, new_fenv_var));
*update = build2 (COMPOUND_EXPR, void_type_node,
build2 (COMPOUND_EXPR, void_type_node,
reload_fenv, restore_fnenv), update_call);
}
开发者ID:rockflying,项目名称:gcc,代码行数:98,代码来源:aarch64-builtins.c
示例9: do_jump
//.........这里部分代码省略.........
&& TREE_OPERAND (exp0, 0) != error_mark_node
&& TYPE_PRECISION (TREE_TYPE (exp0))
<= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp0, 0))))
exp0 = TREE_OPERAND (exp0, 0);
/* "exp0 ^ 1" inverts the sense of the single bit test. */
if (TREE_CODE (exp0) == BIT_XOR_EXPR
&& integer_onep (TREE_OPERAND (exp0, 1)))
{
exp0 = TREE_OPERAND (exp0, 0);
clr_label = if_true_label;
set_label = if_false_label;
setclr_prob = inv (prob);
}
else
{
clr_label = if_false_label;
set_label = if_true_label;
}
if (TREE_CODE (exp0) == RSHIFT_EXPR)
{
tree arg = TREE_OPERAND (exp0, 0);
tree shift = TREE_OPERAND (exp0, 1);
tree argtype = TREE_TYPE (arg);
if (TREE_CODE (shift) == INTEGER_CST
&& compare_tree_int (shift, 0) >= 0
&& compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0
&& prefer_and_bit_test (TYPE_MODE (argtype),
TREE_INT_CST_LOW (shift)))
{
unsigned HOST_WIDE_INT mask
= (unsigned HOST_WIDE_INT) 1 << TREE_INT_CST_LOW (shift);
do_jump (build2 (BIT_AND_EXPR, argtype, arg,
build_int_cstu (argtype, mask)),
clr_label, set_label, setclr_prob);
break;
}
}
}
/* If we are AND'ing with a small constant, do this comparison in the
smallest type that fits. If the machine doesn't have comparisons
that small, it will be converted back to the wider comparison.
This helps if we are testing the sign bit of a narrower object.
combine can't do this for us because it can't know whether a
ZERO_EXTRACT or a compare in a smaller mode exists, but we do. */
if (! SLOW_BYTE_ACCESS
&& TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST
&& TYPE_PRECISION (TREE_TYPE (exp)) <= HOST_BITS_PER_WIDE_INT
&& (i = tree_floor_log2 (TREE_OPERAND (exp, 1))) >= 0
&& (mode = mode_for_size (i + 1, MODE_INT, 0)) != BLKmode
&& (type = lang_hooks.types.type_for_mode (mode, 1)) != 0
&& TYPE_PRECISION (type) < TYPE_PRECISION (TREE_TYPE (exp))
&& have_insn_for (COMPARE, TYPE_MODE (type)))
{
do_jump (fold_convert (type, exp), if_false_label, if_true_label,
prob);
break;
}
if (TYPE_PRECISION (TREE_TYPE (exp)) > 1
|| TREE_CODE (TREE_OPERAND (exp, 1)) == INTEGER_CST)
goto normal;
开发者ID:FilipinOTech,项目名称:gcc,代码行数:66,代码来源:dojump.c
示例10: build_array_notation_expr
//.........这里部分代码省略.........
{
lhs_an_loop_info[ii].var = create_tmp_var (integer_type_node);
lhs_an_loop_info[ii].ind_init = build_modify_expr
(location, lhs_an_loop_info[ii].var,
TREE_TYPE (lhs_an_loop_info[ii].var), NOP_EXPR,
location, build_zero_cst (TREE_TYPE (lhs_an_loop_info[ii].var)),
TREE_TYPE (lhs_an_loop_info[ii].var));
}
for (ii = 0; ii < rhs_rank; ii++)
{
/* When we have a polynomial, we assume that the indices are of type
integer. */
rhs_an_loop_info[ii].var = create_tmp_var (integer_type_node);
rhs_an_loop_info[ii].ind_init = build_modify_expr
(location, rhs_an_loop_info[ii].var,
TREE_TYPE (rhs_an_loop_info[ii].var), NOP_EXPR,
location, build_int_cst (TREE_TYPE (rhs_an_loop_info[ii].var), 0),
TREE_TYPE (rhs_an_loop_info[ii].var));
}
if (lhs_rank)
{
lhs_array_operand = create_array_refs
(location, lhs_an_info, lhs_an_loop_info, lhs_list_size, lhs_rank);
replace_array_notations (&lhs, true, lhs_list, lhs_array_operand);
array_expr_lhs = lhs;
}
if (rhs_array_operand)
vec_safe_truncate (rhs_array_operand, 0);
if (rhs_rank)
{
rhs_array_operand = create_array_refs
(location, rhs_an_info, rhs_an_loop_info, rhs_list_size, rhs_rank);
replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
vec_safe_truncate (rhs_array_operand, 0);
rhs_array_operand = fix_sec_implicit_args (location, rhs_list,
rhs_an_loop_info, rhs_rank,
rhs);
if (!rhs_array_operand)
return error_mark_node;
replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
}
else if (rhs_list_size > 0)
{
rhs_array_operand = fix_sec_implicit_args (location, rhs_list,
lhs_an_loop_info, lhs_rank,
lhs);
if (!rhs_array_operand)
return error_mark_node;
replace_array_notations (&rhs, true, rhs_list, rhs_array_operand);
}
array_expr_lhs = lhs;
array_expr_rhs = rhs;
array_expr = build_modify_expr (location, array_expr_lhs, lhs_origtype,
modifycode, rhs_loc, array_expr_rhs,
rhs_origtype);
create_cmp_incr (location, &lhs_an_loop_info, lhs_rank, lhs_an_info);
if (rhs_rank)
create_cmp_incr (location, &rhs_an_loop_info, rhs_rank, rhs_an_info);
for (ii = 0; ii < MAX (lhs_rank, rhs_rank); ii++)
if (ii < lhs_rank && ii < rhs_rank)
cond_expr[ii] = build2 (TRUTH_ANDIF_EXPR, boolean_type_node,
lhs_an_loop_info[ii].cmp,
rhs_an_loop_info[ii].cmp);
else if (ii < lhs_rank && ii >= rhs_rank)
cond_expr[ii] = lhs_an_loop_info[ii].cmp;
else
gcc_unreachable ();
an_init = pop_stmt_list (an_init);
append_to_statement_list_force (an_init, &loop_with_init);
body = array_expr;
for (ii = 0; ii < MAX (lhs_rank, rhs_rank); ii++)
{
tree incr_list = alloc_stmt_list ();
tree new_loop = push_stmt_list ();
if (lhs_rank)
add_stmt (lhs_an_loop_info[ii].ind_init);
if (rhs_rank)
add_stmt (rhs_an_loop_info[ii].ind_init);
if (lhs_rank)
append_to_statement_list_force (lhs_an_loop_info[ii].incr, &incr_list);
if (rhs_rank && rhs_an_loop_info[ii].incr)
append_to_statement_list_force (rhs_an_loop_info[ii].incr, &incr_list);
c_finish_loop (location, cond_expr[ii], incr_list, body, NULL_TREE,
NULL_TREE, true);
body = pop_stmt_list (new_loop);
}
append_to_statement_list_force (body, &loop_with_init);
lhs_an_info.release ();
lhs_an_loop_info.release ();
if (rhs_rank)
{
rhs_an_info.release ();
rhs_an_loop_info.release ();
}
cond_expr.release ();
return loop_with_init;
}
开发者ID:AHelper,项目名称:gcc,代码行数:101,代码来源:c-array-notation.c
示例11: fix_builtin_array_notation_fn
//.........这里部分代码省略.........
array_op0 = TREE_OPERAND (array_op0, 0);
switch (an_type)
{
case BUILT_IN_CILKPLUS_SEC_REDUCE_ADD:
new_var_init = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, build_zero_cst (new_var_type), new_var_type);
new_expr = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), PLUS_EXPR,
location, func_parm, TREE_TYPE (func_parm));
break;
case BUILT_IN_CILKPLUS_SEC_REDUCE_MUL:
new_var_init = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, build_one_cst (new_var_type), new_var_type);
new_expr = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), MULT_EXPR,
location, func_parm, TREE_TYPE (func_parm));
break;
case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_ZERO:
new_var_init = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, build_one_cst (new_var_type), new_var_type);
/* Initially you assume everything is zero, now if we find a case where
it is NOT true, then we set the result to false. Otherwise
we just keep the previous value. */
new_yes_expr = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, build_zero_cst (TREE_TYPE (*new_var)),
TREE_TYPE (*new_var));
new_no_expr = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, *new_var, TREE_TYPE (*new_var));
new_cond_expr = build2 (NE_EXPR, TREE_TYPE (func_parm), func_parm,
build_zero_cst (TREE_TYPE (func_parm)));
new_expr = build_conditional_expr
(location, new_cond_expr, false, new_yes_expr,
TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr));
break;
case BUILT_IN_CILKPLUS_SEC_REDUCE_ALL_NONZERO:
new_var_init = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, build_one_cst (new_var_type), new_var_type);
/* Initially you assume everything is non-zero, now if we find a case
where it is NOT true, then we set the result to false. Otherwise
we just keep the previous value. */
new_yes_expr = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, build_zero_cst (TREE_TYPE (*new_var)),
TREE_TYPE (*new_var));
new_no_expr = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, *new_var, TREE_TYPE (*new_var));
new_cond_expr = build2 (EQ_EXPR, TREE_TYPE (func_parm), func_parm,
build_zero_cst (TREE_TYPE (func_parm)));
new_expr = build_conditional_expr
(location, new_cond_expr, false, new_yes_expr,
TREE_TYPE (new_yes_expr), new_no_expr, TREE_TYPE (new_no_expr));
break;
case BUILT_IN_CILKPLUS_SEC_REDUCE_ANY_ZERO:
new_var_init = build_modify_expr
(location, *new_var, TREE_TYPE (*new_var), NOP_EXPR,
location, build_zero_cst (new_var_type), new_var_type);
/* Initially we assume there are NO zeros in the list. When we find
a non-zero, we keep the previous value. If we find a zero, we
set the value to true. */
开发者ID:AHelper,项目名称:gcc,代码行数:67,代码来源:c-array-notation.c
示例12: expand_builtin_cilk_detach
void
expand_builtin_cilk_detach (tree exp)
{
rtx_insn *insn;
tree fptr = get_frame_arg (exp);
if (fptr == NULL_TREE)
return;
tree parent = cilk_dot (fptr, CILK_TI_FRAME_PARENT, 0);
tree worker = cilk_dot (fptr, CILK_TI_FRAME_WORKER, 0);
tree tail = cilk_arrow (worker, CILK_TI_WORKER_TAIL, 1);
rtx wreg = expand_expr (worker, NULL_RTX, Pmode, EXPAND_NORMAL);
if (GET_CODE (wreg) != REG)
wreg = copy_to_reg (wreg);
rtx preg = expand_expr (parent, NULL_RTX, Pmode, EXPAND_NORMAL);
/* TMP <- WORKER.TAIL
*TMP <- PARENT
TMP <- TMP + 1
WORKER.TAIL <- TMP */
HOST_WIDE_INT worker_tail_offset =
tree_to_shwi (DECL_FIELD_OFFSET (cilk_trees[CILK_TI_WORKER_TAIL])) +
tree_to_shwi (DECL_FIELD_BIT_OFFSET (cilk_trees[CILK_TI_WORKER_TAIL])) /
BITS_PER_UNIT;
rtx tmem0 = gen_rtx_MEM (Pmode,
plus_constant (Pmode, wreg, worker_tail_offset));
set_mem_attributes (tmem0, tail, 0);
MEM_NOTRAP_P (tmem0) = 1;
gcc_assert (MEM_VOLATILE_P (tmem0));
rtx treg = copy_to_mode_reg (Pmode, tmem0);
rtx tmem1 = gen_rtx_MEM (Pmode, treg);
set_mem_attributes (tmem1, TREE_TYPE (TREE_TYPE (tail)), 0);
MEM_NOTRAP_P (tmem1) = 1;
emit_move_insn (tmem1, preg);
emit_move_insn (treg, plus_constant (Pmode, treg, GET_MODE_SIZE (Pmode)));
/* There is a release barrier (st8.rel, membar #StoreStore,
sfence, lwsync, etc.) between the two stores. On x86
normal volatile stores have proper semantics; the sfence
would only be needed for nontemporal stores (which we
could generate using the storent optab, for no benefit
in this case).
The predicate may return false even for a REG if this is
the limited release operation that only stores 0. */
enum insn_code icode = direct_optab_handler (sync_lock_release_optab, Pmode);
if (icode != CODE_FOR_nothing
&& insn_data[icode].operand[1].predicate (treg, Pmode)
&& (insn = GEN_FCN (icode) (tmem0, treg)) != NULL_RTX)
emit_insn (insn);
else
emit_move_insn (tmem0, treg);
/* The memory barrier inserted above should not prevent
the load of flags from being moved before the stores,
but in practice it does because it is implemented with
unspec_volatile. In-order RISC machines should
explicitly load flags earlier. */
tree flags = cilk_dot (fptr, CILK_TI_FRAME_FLAGS, 0);
expand_expr (build2 (MODIFY_EXPR, void_type_node, flags,
build2 (BIT_IOR_EXPR, TREE_TYPE (flags), flags,
build_int_cst (TREE_TYPE (flags),
CILK_FRAME_DETACHED))),
const0_rtx, VOIDmode, EXPAND_NORMAL);
}
开发者ID:krichter722,项目名称:gcc,代码行数:69,代码来源:cilk-common.c
示例13: c_finish_omp_for
//.........这里部分代码省略.........
{
error_at (elocus, "invalid controlling predicate");
fail = true;
}
}
if (incr == NULL_TREE)
{
error_at (elocus, "missing increment expression");
fail = true;
}
else
{
bool incr_ok = false;
if (EXPR_HAS_LOCATION (incr))
elocus = EXPR_LOCATION (incr);
/* Check all the valid increment expressions: v++, v--, ++v, --v,
v = v + incr, v = incr + v and v = v - incr. */
switch (TREE_CODE (incr))
{
case POSTINCREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREDECREMENT_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
incr_ok = true;
incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
break;
case COMPOUND_EXPR:
if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
|| TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
break;
incr = TREE_OPERAND (incr, 1);
/* FALLTHRU */
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
if (TREE_OPERAND (incr, 1) == decl)
break;
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
&& (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
|| TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
incr_ok = true;
else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
|| (TREE_CODE (TREE_OPERAND (incr, 1))
== POINTER_PLUS_EXPR))
&& TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
incr_ok = true;
else
{
tree t = check_omp_for_incr_expr (elocus,
TREE_OPERAND (incr, 1),
decl);
if (t != error_mark_node)
{
incr_ok = true;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
}
break;
default:
break;
}
if (!incr_ok)
{
error_at (elocus, "invalid increment expression");
fail = true;
}
}
TREE_VEC_ELT (initv, i) = init;
TREE_VEC_ELT (incrv, i) = incr;
}
if (fail)
return NULL;
else
{
tree t = make_node (code);
TREE_TYPE (t) = void_type_node;
OMP_FOR_INIT (t) = initv;
OMP_FOR_COND (t) = condv;
OMP_FOR_INCR (t) = incrv;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
if (code == OMP_FOR)
OMP_FOR_ORIG_DECLS (t) = orig_declv;
SET_EXPR_LOCATION (t, locus);
return add_stmt (t);
}
}
开发者ID:ymgcmstk,项目名称:gcc,代码行数:101,代码来源:c-omp.c
示例14: c_finish_omp_atomic
tree
c_finish_omp_atomic (location_t loc, enum tree_code code,
enum tree_code opcode, tree lhs, tree rhs,
tree v, tree lhs1, tree rhs1, bool swapped, bool seq_cst)
{
tree x, type, addr, pre = NULL_TREE;
if (lhs == error_mark_node || rhs == error_mark_node
|| v == error_mark_node || lhs1 == error_mark_node
|| rhs1 == error_mark_node)
return error_mark_node;
/* ??? According to one reading of the OpenMP spec, complex type are
supported, but there are no atomic stores for any architecture.
But at least icc 9.0 doesn't support complex types here either.
And lets not even talk about vector types... */
type = TREE_TYPE (lhs);
if (!INTEGRAL_TYPE_P (type)
&& !POINTER_TYPE_P (type)
&& !SCALAR_FLOAT_TYPE_P (type))
{
error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
return error_mark_node;
}
if (opcode == RDIV_EXPR)
opcode = TRUNC_DIV_EXPR;
/* ??? Validate that rhs does not overlap lhs. */
/* Take and save the address of the lhs. From then on we'll reference it
via indirection. */
addr = build_unary_op (loc, ADDR_EXPR, lhs, 0);
if (addr == error_mark_node)
return error_mark_node;
addr = save_expr (addr);
if (TREE_CODE (addr) != SAVE_EXPR
&& (TREE_CODE (addr) != ADDR_EXPR
|| !VAR_P (TREE_OPERAND (addr, 0))))
{
/* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
it even after unsharing function body. */
tree var = create_tmp_var_raw (TREE_TYPE (addr));
DECL_CONTEXT (var) = current_function_decl;
addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
}
lhs = build_indirect_ref (loc, addr, RO_NULL);
if (code == OMP_ATOMIC_READ)
{
x = build1 (OMP_ATOMIC_READ, type, addr);
SET_EXPR_LOCATION (x, loc);
OMP_ATOMIC_SEQ_CST (x) = seq_cst;
return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
loc, x, NULL_TREE);
}
/* There are lots of warnings, errors, and conversions that need to happen
in the course of interpreting a statement. Use the normal mechanisms
to do this, and then take it apart again. */
if (swapped)
{
rhs = build_binary_op (loc, opcode, rhs, lhs, 1);
opcode = NOP_EXPR;
}
bool save = in_late_binary_op;
in_late_binary_op = true;
x = build_modify_expr (loc, lhs, NULL_TREE, opcode, loc, rhs, NULL_TREE);
in_late_binary_op = save;
if (x == error_mark_node)
return error_mark_node;
if (TREE_CODE (x) == COMPOUND_EXPR)
{
pre = TREE_OPERAND (x, 0);
gcc_assert (TREE_CODE (pre) == SAVE_EXPR);
x = TREE_OPERAND (x, 1);
}
gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
rhs = TREE_OPERAND (x, 1);
/* Punt the actual generation of atomic operations to common code. */
if (code == OMP_ATOMIC)
type = void_type_node;
x = build2 (code, type, addr, rhs);
SET_EXPR_LOCATION (x, loc);
OMP_ATOMIC_SEQ_CST (x) = seq_cst;
/* Generally it is hard to prove lhs1 and lhs are the same memory
location, just diagnose different variables. */
if (rhs1
&& VAR_P (rhs1)
&& VAR_P (lhs)
&& rhs1 != lhs)
{
if (code == OMP_ATOMIC)
error_at (loc, "%<#pragma omp atomic update%> uses two different variables for memory");
else
error_at (loc, "%<#pragma omp atomic capture%> uses two different variables for memory");
return error_mark_node;
}
//.........这里部分代码省略.........
开发者ID:ymgcmstk,项目名称:gcc,代码行数:101,代码来源:c-omp.c
示例15: c_finish_omp_for
//.........这里部分代码省略.........
else if (TREE_CODE (op1) == NOP_EXPR
&& decl == TREE_OPERAND (op1, 0))
{
TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
TREE_OPERAND (cond, 0) = fold_build1 (NOP_EXPR, TREE_TYPE (decl),
TREE_OPERAND (cond, 0));
}
if (decl == TREE_OPERAND (cond, 0))
cond_ok = true;
else if (decl == TREE_OPERAND (cond, 1))
{
TREE_SET_CODE (cond, swap_tree_comparison (TREE_CODE (cond)));
TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
TREE_OPERAND (cond, 0) = decl;
cond_ok = true;
}
}
if (!cond_ok)
{
error ("%Hinvalid controlling predicate", &elocus);
fail = true;
}
}
if (incr == NULL_TREE)
{
error ("%Hmissing increment expression", &elocus);
fail = true;
}
else
{
bool incr_ok = false;
if (EXPR_HAS_LOCATION (incr))
elocus = EXPR_LOCATION (incr);
/* Check all the valid increment expressions: v++, v--, ++v, --v,
v = v + incr, v = incr + v and v = v - incr. */
switch (TREE_CODE (incr))
{
case POSTINCREMENT_EXPR:
case PREINCREMENT_EXPR:
case POSTDECREMENT_EXPR:
case PREDECREMENT_EXPR:
incr_ok = (TREE_OPERAND (incr, 0) == decl);
break;
case MODIFY_EXPR:
if (TREE_OPERAND (incr, 0) != decl)
break;
if (TREE_OPERAND (incr, 1) == decl)
break;
if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
&& (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
|| TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
incr_ok = true;
else if (TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
&& TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
incr_ok = true;
else
{
tree t = check_omp_for_incr_expr (TREE_OPERAND (incr, 1), decl);
if (t != error_mark_node)
{
incr_ok = true;
t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
}
}
break;
default:
break;
}
if (!incr_ok)
{
error ("%Hinvalid increment expression", &elocus);
fail = true;
}
}
if (fail)
return NULL;
else
{
tree t = make_node (OMP_FOR);
TREE_TYPE (t) = void_type_node;
OMP_FOR_INIT (t) = init;
OMP_FOR_COND (t) = cond;
OMP_FOR_INCR (t) = incr;
OMP_FOR_BODY (t) = body;
OMP_FOR_PRE_BODY (t) = pre_body;
SET_EXPR_LOCATION (t, locus);
return add_stmt (t);
}
}
开发者ID:austinsc,项目名称:GCCXML,代码行数:101,代码来源:c-omp.c
示例16: do_jump
void
do_jump (tree exp, rtx if_false_label, rtx if_true_label)
{
enum tree_code code = TREE_CODE (exp);
rtx temp;
int i;
tree type;
enum machine_mode mode;
rtx drop_through_label = 0;
switch (code)
{
case ERROR_MARK:
break;
case INTEGER_CST:
temp = integer_zerop (exp) ? if_false_label : if_true_label;
if (temp)
emit_jump (temp);
break;
#if 0
/* This is not true with #pragma weak */
case ADDR_EXPR:
/* The address of something can never be zero. */
if (if_true_label)
emit_jump (if_true_label);
break;
#endif
case NOP_EXPR:
if (TREE_CODE (TREE_OPERAND (exp, 0)) == COMPONENT_REF
|| TREE_CODE (TREE_OPERAND (exp, 0)) == BIT_FIELD_REF
|| TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_REF
|| TREE_CODE (TREE_OPERAND (exp, 0)) == ARRAY_RANGE_REF)
goto normal;
case CONVERT_EXPR:
/* If we are narrowing the operand, we have to do the compare in the
narrower mode. */
if ((TYPE_PRECISION (TREE_TYPE (exp))
< TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp, 0)))))
goto normal;
case NON_LVALUE_EXPR:
case ABS_EXPR:
case NEGATE_EXPR:
case LROTATE_EXPR:
case RROTATE_EXPR:
/* These cannot change zero->nonzero or vice versa. */
do_jump (TREE_OPERAND (exp, 0), if_false_label, if_true_label);
break;
case BIT_AND_EXPR:
/* fold_single_bit_test() converts (X & (1 << C)) into (X >> C) & 1.
See if the former is preferred for jump tests and restore it
if so. */
if (integer_onep (TREE_OPERAND (exp, 1)))
{
tree exp0 = TREE_OPERAND (exp, 0);
rtx set_label, clr_label;
/* Strip narrowing integral type conversions. */
while ((TREE_CODE (exp0) == NOP_EXPR
|| TREE_CODE (exp0) == CONVERT_EXPR
|| TREE_CODE (exp0) == NON_LVALUE_EXPR)
&& TREE_OPERAND (exp0, 0) != error_mark_node
&& TYPE_PRECISION (TREE_TYPE (exp0))
<= TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (exp0, 0))))
exp0 = TREE_OPERAND (exp0, 0);
/* "exp0 ^ 1" inverts the sense of the single bit test. */
if (TREE_CODE (exp0) == BIT_XOR_EXPR
&& integer_onep (TREE_OPERAND (exp0, 1)))
{
exp0 = TREE_OPERAND (exp0, 0);
clr_label = if_true_label;
set_label = if_false_label;
}
else
{
clr_label = if_false_label;
set_label = if_true_label;
}
if (TREE_CODE (exp0) == RSHIFT_EXPR)
{
tree arg = TREE_OPERAND (exp0, 0);
tree shift = TREE_OPERAND (exp0, 1);
tree argtype = TREE_TYPE (arg);
if (TREE_CODE (shift) == INTEGER_CST
&& compare_tree_int (shift, 0) >= 0
&& compare_tree_int (shift, HOST_BITS_PER_WIDE_INT) < 0
&& prefer_and_bit_test (TYPE_MODE (argtype),
TREE_INT_CST_LOW (shift)))
{
HOST_WIDE_INT mask = (HOST_WIDE_INT) 1
<< TREE_INT_CST_LOW (shift);
do_jump (build2 (BIT_AND_EXPR, argtype, arg,
build_int_cst_type (argtype, mask)),
clr_label, set_label);
break;
//.........这里部分代码省略.........
开发者ID:Abioy,项目名称:gccxml,代码行数:101,代码来源:dojump.c
|
请发表评论