Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

8293050: RISC-V: Remove redundant non-null assertions about macro-assembler #10079

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 1 addition & 7 deletions src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -45,7 +45,6 @@

void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count, RegSet saved_regs) {
assert_cond(masm != NULL);
bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
if (!dest_uninitialized) {
Label done;
Expand Down Expand Up @@ -88,7 +87,6 @@ void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm

void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register count, Register tmp, RegSet saved_regs) {
assert_cond(masm != NULL);
__ push_reg(saved_regs, sp);
assert_different_registers(start, count, tmp);
assert_different_registers(c_rarg0, count);
Expand All @@ -109,7 +107,6 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
// directly to skip generating the check by
// InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.

assert_cond(masm != NULL);
assert(thread == xthread, "must be");

Label done;
Expand Down Expand Up @@ -179,7 +176,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,
Register thread,
Register tmp,
Register tmp2) {
assert_cond(masm != NULL);
assert(thread == xthread, "must be");
assert_different_registers(store_addr, new_val, thread, tmp, tmp2,
t0);
Expand Down Expand Up @@ -254,7 +250,6 @@ void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm,

void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread) {
assert_cond(masm != NULL);
bool on_oop = is_reference_type(type);
bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
Expand All @@ -278,7 +273,6 @@ void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorator

void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
assert_cond(masm != NULL);
// flatten object address if needed
if (dst.offset() == 0) {
if (dst.base() != x13) {
Expand Down
6 changes: 0 additions & 6 deletions src/hotspot/cpu/riscv/gc/shared/barrierSetAssembler_riscv.cpp
Expand Up @@ -40,8 +40,6 @@

void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Register dst, Address src, Register tmp1, Register tmp_thread) {
assert_cond(masm != NULL);

// RA is live. It must be saved around calls.

bool in_heap = (decorators & IN_HEAP) != 0;
Expand Down Expand Up @@ -82,7 +80,6 @@ void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators,

void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
Address dst, Register val, Register tmp1, Register tmp2) {
assert_cond(masm != NULL);
bool in_heap = (decorators & IN_HEAP) != 0;
bool in_native = (decorators & IN_NATIVE) != 0;
switch (type) {
Expand Down Expand Up @@ -124,7 +121,6 @@ void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators

void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath) {
assert_cond(masm != NULL);
// If mask changes we need to ensure that the inverse is still encodable as an immediate
STATIC_ASSERT(JNIHandles::weak_tag_mask == 1);
__ andi(obj, obj, ~JNIHandles::weak_tag_mask);
Expand All @@ -139,7 +135,6 @@ void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
Register tmp2,
Label& slow_case,
bool is_far) {
assert_cond(masm != NULL);
assert_different_registers(obj, tmp2);
assert_different_registers(obj, var_size_in_bytes);
Register end = tmp2;
Expand All @@ -166,7 +161,6 @@ void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm,
Register var_size_in_bytes,
int con_size_in_bytes,
Register tmp1) {
assert_cond(masm != NULL);
assert(tmp1->is_valid(), "need temp reg");

__ ld(tmp1, Address(xthread, in_bytes(JavaThread::allocated_bytes_offset())));
Expand Down
Expand Up @@ -36,7 +36,6 @@


void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Register tmp) {
assert_cond(masm != NULL);
assert_different_registers(obj, tmp);
BarrierSet* bs = BarrierSet::barrier_set();
assert(bs->kind() == BarrierSet::CardTableBarrierSet, "Wrong barrier set kind");
Expand All @@ -62,7 +61,6 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob

void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register start, Register count, Register tmp, RegSet saved_regs) {
assert_cond(masm != NULL);
assert_different_registers(start, tmp);
assert_different_registers(count, tmp);

Expand Down Expand Up @@ -103,7 +101,6 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS
if (!precise || dst.offset() == 0) {
store_check(masm, dst.base(), x13);
} else {
assert_cond(masm != NULL);
__ la(x13, dst);
store_check(masm, x13, t0);
}
Expand Down
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2021, Huawei Technologies Co., Ltd. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -31,7 +31,6 @@

void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
Register src, Register dst, Register count, RegSet saved_regs) {

if (is_oop) {
gen_write_ref_array_pre_barrier(masm, decorators, dst, count, saved_regs);
}
Expand Down
6 changes: 0 additions & 6 deletions src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
Expand Up @@ -62,28 +62,24 @@

static void pass_arg0(MacroAssembler* masm, Register arg) {
if (c_rarg0 != arg) {
assert_cond(masm != NULL);
masm->mv(c_rarg0, arg);
}
}

static void pass_arg1(MacroAssembler* masm, Register arg) {
if (c_rarg1 != arg) {
assert_cond(masm != NULL);
masm->mv(c_rarg1, arg);
}
}

static void pass_arg2(MacroAssembler* masm, Register arg) {
if (c_rarg2 != arg) {
assert_cond(masm != NULL);
masm->mv(c_rarg2, arg);
}
}

static void pass_arg3(MacroAssembler* masm, Register arg) {
if (c_rarg3 != arg) {
assert_cond(masm != NULL);
masm->mv(c_rarg3, arg);
}
}
Expand Down Expand Up @@ -1720,7 +1716,6 @@ void MacroAssembler::bang_stack_size(Register size, Register tmp) {
}

SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value) {
assert_cond(masm != NULL);
int32_t offset = 0;
_masm = masm;
_masm->la_patchable(t0, ExternalAddress((address)flag_addr), offset);
Expand All @@ -1729,7 +1724,6 @@ SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value
}

SkipIfEqual::~SkipIfEqual() {
assert_cond(_masm != NULL);
_masm->bind(_label);
_masm = NULL;
}
Expand Down
6 changes: 0 additions & 6 deletions src/hotspot/cpu/riscv/methodHandles_riscv.cpp
Expand Up @@ -48,7 +48,6 @@
#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")

void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
assert_cond(_masm != NULL);
if (VerifyMethodHandles) {
verify_klass(_masm, klass_reg, VM_CLASS_ID(java_lang_Class),
"MH argument is a Class");
Expand All @@ -70,7 +69,6 @@ static int check_nonzero(const char* xname, int x) {
void MethodHandles::verify_klass(MacroAssembler* _masm,
Register obj, vmClassID klass_id,
const char* error_message) {
assert_cond(_masm != NULL);
InstanceKlass** klass_addr = vmClasses::klass_addr_at(klass_id);
Klass* klass = vmClasses::klass_at(klass_id);
Register temp = t1;
Expand Down Expand Up @@ -99,7 +97,6 @@ void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Registe

void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
bool for_compiler_entry) {
assert_cond(_masm != NULL);
assert(method == xmethod, "interpreter calling convention");
Label L_no_such_method;
__ beqz(xmethod, L_no_such_method);
Expand Down Expand Up @@ -130,7 +127,6 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
Register recv, Register method_temp,
Register temp2,
bool for_compiler_entry) {
assert_cond(_masm != NULL);
BLOCK_COMMENT("jump_to_lambda_form {");
// This is the initial entry point of a lazy method handle.
// After type checking, it picks up the invoker from the LambdaForm.
Expand Down Expand Up @@ -169,7 +165,6 @@ void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
// Code generation
address MethodHandles::generate_method_handle_interpreter_entry(MacroAssembler* _masm,
vmIntrinsics::ID iid) {
assert_cond(_masm != NULL);
const bool not_for_compiler_entry = false; // this is the interpreter entry
assert(is_signature_polymorphic(iid), "expected invoke iid");
if (iid == vmIntrinsics::_invokeGeneric ||
Expand Down Expand Up @@ -269,7 +264,6 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
Register receiver_reg,
Register member_reg,
bool for_compiler_entry) {
assert_cond(_masm != NULL);
assert(is_signature_polymorphic(iid), "expected invoke iid");
// temps used in this code are not used in *either* compiled or interpreted calling sequences
Register temp1 = x7;
Expand Down
15 changes: 1 addition & 14 deletions src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
Expand Up @@ -151,7 +151,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
}
#endif

assert_cond(masm != NULL && total_frame_words != NULL);
int frame_size_in_bytes = align_up(additional_frame_words * wordSize + ra_offset_in_bytes() + wordSize, 16);
// OopMap frame size is in compiler stack slots (jint's) not bytes or words
int frame_size_in_slots = frame_size_in_bytes / BytesPerInt;
Expand Down Expand Up @@ -206,7 +205,6 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
}

void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
assert_cond(masm != NULL);
#ifdef COMPILER2
__ pop_CPU_state(_save_vectors, Matcher::scalable_vector_reg_size(T_BYTE));
#else
Expand Down Expand Up @@ -331,7 +329,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,

// Patch the callers callsite with entry to compiled code if it exists.
static void patch_callers_callsite(MacroAssembler *masm) {
assert_cond(masm != NULL);
Label L;
__ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
__ beqz(t0, L);
Expand Down Expand Up @@ -769,7 +766,6 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// 32bits for a parameter. On 32bit it will simply be 32 bits
// So this routine will do 32->32 on 32bit and 32->64 on 64bit
static void move32_64(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert_cond(masm != NULL);
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
Expand Down Expand Up @@ -799,7 +795,6 @@ static void object_move(MacroAssembler* masm,
VMRegPair dst,
bool is_receiver,
int* receiver_offset) {
assert_cond(masm != NULL && map != NULL && receiver_offset != NULL);
// must pass a handle. First figure out the location we use as a handle
Register rHandle = dst.first()->is_stack() ? t1 : dst.first()->as_Register();

Expand Down Expand Up @@ -882,7 +877,6 @@ static void object_move(MacroAssembler* masm,
static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(src.first()->is_stack() && dst.first()->is_stack() ||
src.first()->is_reg() && dst.first()->is_reg() || src.first()->is_stack() && dst.first()->is_reg(), "Unexpected error");
assert_cond(masm != NULL);
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
__ lwu(t0, Address(fp, reg2offset_in(src.first())));
Expand All @@ -903,7 +897,6 @@ static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {

// A long move
static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert_cond(masm != NULL);
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
// stack to stack
Expand All @@ -927,7 +920,6 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
assert(src.first()->is_stack() && dst.first()->is_stack() ||
src.first()->is_reg() && dst.first()->is_reg() || src.first()->is_stack() && dst.first()->is_reg(), "Unexpected error");
assert_cond(masm != NULL);
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
__ ld(t0, Address(fp, reg2offset_in(src.first())));
Expand All @@ -947,7 +939,6 @@ static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}

void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
assert_cond(masm != NULL);
// We always ignore the frame_slots arg and just use the space just below frame pointer
// which by this time is free to use
switch (ret_type) {
Expand All @@ -965,7 +956,6 @@ void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type,
}

void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_type, int frame_slots) {
assert_cond(masm != NULL);
// We always ignore the frame_slots arg and just use the space just below frame pointer
// which by this time is free to use
switch (ret_type) {
Expand All @@ -983,7 +973,6 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
}

static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
assert_cond(masm != NULL && args != NULL);
RegSet x;
for ( int i = first_arg ; i < arg_count ; i++ ) {
if (args[i].first()->is_Register()) {
Expand All @@ -997,7 +986,6 @@ static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegP
}

static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
assert_cond(masm != NULL && args != NULL);
RegSet x;
for ( int i = first_arg ; i < arg_count ; i++ ) {
if (args[i].first()->is_Register()) {
Expand All @@ -1018,7 +1006,6 @@ static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMR
}

static void rt_call(MacroAssembler* masm, address dest) {
assert_cond(masm != NULL);
CodeBlob *cb = CodeCache::find_blob(dest);
if (cb) {
__ far_call(RuntimeAddress(dest));
Expand Down Expand Up @@ -2536,7 +2523,7 @@ SafepointBlob* SharedRuntime::generate_handler_blob(address call_ptr, int poll_t
// must do any gc of the args.
//
RuntimeStub* SharedRuntime::generate_resolve_blob(address destination, const char* name) {
assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
assert(StubRoutines::forward_exception_entry() != NULL, "must be generated before");

// allocate space for the code
ResourceMark rm;
Expand Down
12 changes: 3 additions & 9 deletions src/hotspot/cpu/riscv/templateTable_riscv.cpp
Expand Up @@ -70,15 +70,12 @@ static inline Address aaddress(int n) {
return iaddress(n);
}

static inline Address iaddress(Register r, Register temp, InterpreterMacroAssembler* _masm) {
assert_cond(_masm != NULL);
static inline Address iaddress(Register r, Register temp, InterpreterMacroAssembler* _masm) {
_masm->shadd(temp, r, xlocals, temp, 3);
return Address(temp, 0);
}

static inline Address laddress(Register r, Register temp,
InterpreterMacroAssembler* _masm) {
assert_cond(_masm != NULL);
static inline Address laddress(Register r, Register temp, InterpreterMacroAssembler* _masm) {
_masm->shadd(temp, r, xlocals, temp, 3);
return Address(temp, Interpreter::local_offset_in_bytes(1));;
}
Expand All @@ -87,8 +84,7 @@ static inline Address faddress(Register r, Register temp, InterpreterMacroAssemb
return iaddress(r, temp, _masm);
}

static inline Address daddress(Register r, Register temp,
InterpreterMacroAssembler* _masm) {
static inline Address daddress(Register r, Register temp, InterpreterMacroAssembler* _masm) {
return laddress(r, temp, _masm);
}

Expand Down Expand Up @@ -134,15 +130,13 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
Register val,
DecoratorSet decorators) {
assert(val == noreg || val == x10, "parameter is just for looks");
assert_cond(_masm != NULL);
__ store_heap_oop(dst, val, x29, x11, decorators);
}

static void do_oop_load(InterpreterMacroAssembler* _masm,
Address src,
Register dst,
DecoratorSet decorators) {
assert_cond(_masm != NULL);
__ load_heap_oop(dst, src, x7, x11, decorators);
}

Expand Down