mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-06 09:29:38 +01:00
8320397: RISC-V: Avoid passing t0 as temp register to MacroAssembler:: cmpxchg_obj_header/cmpxchgptr
Backport-of: 0be0775a76
This commit is contained in:
@@ -52,11 +52,11 @@ static void x_load_barrier_slow_path(MacroAssembler& _masm, const MachNode* node
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct xLoadP(iRegPNoSp dst, memory mem)
|
||||
instruct xLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && !ZGenerational && (n->as_Load()->barrier_data() != 0));
|
||||
effect(TEMP dst);
|
||||
effect(TEMP dst, TEMP tmp);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
|
||||
@@ -65,17 +65,17 @@ instruct xLoadP(iRegPNoSp dst, memory mem)
|
||||
ins_encode %{
|
||||
const Address ref_addr (as_Register($mem$$base), $mem$$disp);
|
||||
__ ld($dst$$Register, ref_addr);
|
||||
x_load_barrier(_masm, this, ref_addr, $dst$$Register, t0 /* tmp */, barrier_data());
|
||||
x_load_barrier(_masm, this, ref_addr, $dst$$Register, $tmp$$Register /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
effect(TEMP_DEF res, TEMP tmp);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -86,17 +86,15 @@ instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
|
||||
Label failed;
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ beqz($res$$Register, failed);
|
||||
__ mv(t0, $oldval$$Register);
|
||||
__ bind(failed);
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $tmp$$Register);
|
||||
__ sub(t0, $tmp$$Register, $oldval$$Register);
|
||||
__ seqz($res$$Register, t0);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t1, Address(xthread, XThreadLocalData::address_bad_mask_offset()), t1 /* tmp */);
|
||||
__ andr(t1, t1, t0);
|
||||
__ beqz(t1, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */);
|
||||
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $tmp$$Register);
|
||||
__ beqz(t0, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
@@ -107,11 +105,11 @@ instruct xCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
|
||||
instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() == XLoadBarrierStrong));
|
||||
effect(KILL cr, TEMP_DEF res);
|
||||
effect(TEMP_DEF res, TEMP tmp);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -122,17 +120,15 @@ instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
|
||||
Label failed;
|
||||
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
__ beqz($res$$Register, failed);
|
||||
__ mv(t0, $oldval$$Register);
|
||||
__ bind(failed);
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $tmp$$Register);
|
||||
__ sub(t0, $tmp$$Register, $oldval$$Register);
|
||||
__ seqz($res$$Register, t0);
|
||||
if (barrier_data() != XLoadBarrierElided) {
|
||||
Label good;
|
||||
__ ld(t1, Address(xthread, XThreadLocalData::address_bad_mask_offset()), t1 /* tmp */);
|
||||
__ andr(t1, t1, t0);
|
||||
__ beqz(t1, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), t0 /* ref */, t1 /* tmp */);
|
||||
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $tmp$$Register);
|
||||
__ beqz(t0, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $tmp$$Register /* ref */, $res$$Register /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
|
||||
true /* result_as_bool */);
|
||||
@@ -143,10 +139,10 @@ instruct xCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{
|
||||
instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(TEMP_DEF res);
|
||||
effect(TEMP_DEF res, TEMP tmp);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -161,7 +157,7 @@ instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
|
||||
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $res$$Register);
|
||||
__ beqz(t0, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
__ bind(good);
|
||||
@@ -171,10 +167,10 @@ instruct xCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) %{
|
||||
instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() == XLoadBarrierStrong);
|
||||
effect(TEMP_DEF res);
|
||||
effect(TEMP_DEF res, TEMP tmp);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -189,7 +185,7 @@ instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
|
||||
__ ld(t0, Address(xthread, XThreadLocalData::address_bad_mask_offset()));
|
||||
__ andr(t0, t0, $res$$Register);
|
||||
__ beqz(t0, good);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, t0 /* tmp */);
|
||||
x_load_barrier_slow_path(_masm, this, Address($mem$$Register), $res$$Register /* ref */, $tmp$$Register /* tmp */);
|
||||
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, Assembler::int64,
|
||||
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
__ bind(good);
|
||||
@@ -199,10 +195,10 @@ instruct xCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
effect(TEMP_DEF prev, TEMP tmp);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -210,16 +206,16 @@ instruct xGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data());
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && !ZGenerational && needs_acquiring_load_reserved(n) && (n->as_LoadStore()->barrier_data() != 0));
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
effect(TEMP_DEF prev, TEMP tmp);
|
||||
|
||||
ins_cost(VOLATILE_REF_COST);
|
||||
|
||||
@@ -227,7 +223,7 @@ instruct xGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr)
|
||||
|
||||
ins_encode %{
|
||||
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, t0 /* tmp */, barrier_data());
|
||||
x_load_barrier(_masm, this, Address(noreg, 0), $prev$$Register, $tmp$$Register /* tmp */, barrier_data());
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
@@ -79,7 +79,7 @@ static void z_load_barrier(MacroAssembler& _masm, const MachNode* node, Address
|
||||
|
||||
static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, Register tmp, bool is_atomic) {
|
||||
if (node->barrier_data() == ZBarrierElided) {
|
||||
z_color(_masm, node, rnew_zpointer, rnew_zaddress, t0);
|
||||
z_color(_masm, node, rnew_zpointer, rnew_zaddress, tmp);
|
||||
} else {
|
||||
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
|
||||
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic);
|
||||
@@ -90,11 +90,11 @@ static void z_store_barrier(MacroAssembler& _masm, const MachNode* node, Address
|
||||
%}
|
||||
|
||||
// Load Pointer
|
||||
instruct zLoadP(iRegPNoSp dst, memory mem)
|
||||
instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp)
|
||||
%{
|
||||
match(Set dst (LoadP mem));
|
||||
predicate(UseZGC && ZGenerational && n->as_Load()->barrier_data() != 0);
|
||||
effect(TEMP dst);
|
||||
effect(TEMP dst, TEMP tmp);
|
||||
|
||||
ins_cost(4 * DEFAULT_COST);
|
||||
|
||||
@@ -103,34 +103,35 @@ instruct zLoadP(iRegPNoSp dst, memory mem)
|
||||
ins_encode %{
|
||||
const Address ref_addr(as_Register($mem$$base), $mem$$disp);
|
||||
__ ld($dst$$Register, ref_addr);
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, t0);
|
||||
z_load_barrier(_masm, this, ref_addr, $dst$$Register, $tmp$$Register);
|
||||
%}
|
||||
|
||||
ins_pipe(iload_reg_mem);
|
||||
%}
|
||||
|
||||
// Store Pointer
|
||||
instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp, rFlagsReg cr)
|
||||
instruct zStoreP(memory mem, iRegP src, iRegPNoSp tmp1, iRegPNoSp tmp2)
|
||||
%{
|
||||
predicate(UseZGC && ZGenerational && n->as_Store()->barrier_data() != 0);
|
||||
match(Set mem (StoreP mem src));
|
||||
effect(TEMP tmp, KILL cr);
|
||||
effect(TEMP tmp1, TEMP tmp2);
|
||||
|
||||
ins_cost(125); // XXX
|
||||
format %{ "sd $mem, $src\t# ptr" %}
|
||||
ins_encode %{
|
||||
const Address ref_addr(as_Register($mem$$base), $mem$$disp);
|
||||
z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp$$Register, t1, false /* is_atomic */);
|
||||
__ sd($tmp$$Register, ref_addr);
|
||||
z_store_barrier(_masm, this, ref_addr, $src$$Register, $tmp1$$Register, $tmp2$$Register, false /* is_atomic */);
|
||||
__ sd($tmp1$$Register, ref_addr);
|
||||
%}
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval,
|
||||
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -140,19 +141,20 @@ instruct zCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newva
|
||||
ins_encode %{
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
Address ref_addr($mem$$Register);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval,
|
||||
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1) %{
|
||||
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
|
||||
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -162,18 +164,19 @@ instruct zCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
|
||||
ins_encode %{
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
Address ref_addr($mem$$Register);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register, true /* result_as_bool */);
|
||||
%}
|
||||
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval,
|
||||
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -182,8 +185,8 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
|
||||
ins_encode %{
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
Address ref_addr($mem$$Register);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
z_uncolor(_masm, this, $res$$Register);
|
||||
%}
|
||||
@@ -191,10 +194,11 @@ instruct zCompareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP n
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, rFlagsReg cr) %{
|
||||
instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval,
|
||||
iRegPNoSp oldval_tmp, iRegPNoSp newval_tmp, iRegPNoSp tmp1) %{
|
||||
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, KILL cr, TEMP_DEF res);
|
||||
effect(TEMP oldval_tmp, TEMP newval_tmp, TEMP tmp1, TEMP_DEF res);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
@@ -203,8 +207,8 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
|
||||
ins_encode %{
|
||||
guarantee($mem$$disp == 0, "impossible encoding");
|
||||
Address ref_addr($mem$$Register);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, t0);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, t1, true /* is_atomic */);
|
||||
z_color(_masm, this, $oldval_tmp$$Register, $oldval$$Register, $tmp1$$Register);
|
||||
z_store_barrier(_masm, this, ref_addr, $newval$$Register, $newval_tmp$$Register, $tmp1$$Register, true /* is_atomic */);
|
||||
__ cmpxchg($mem$$Register, $oldval_tmp$$Register, $newval_tmp$$Register, Assembler::int64, Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register);
|
||||
z_uncolor(_masm, this, $res$$Register);
|
||||
%}
|
||||
@@ -212,17 +216,17 @@ instruct zCompareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iReg
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && ZGenerational && !needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
effect(TEMP_DEF prev, TEMP tmp);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg $prev, $newv, [$mem], #@zGetAndSetP" %}
|
||||
|
||||
ins_encode %{
|
||||
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, t1, true /* is_atomic */);
|
||||
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
|
||||
__ atomic_xchg($prev$$Register, $prev$$Register, $mem$$Register);
|
||||
z_uncolor(_masm, this, $prev$$Register);
|
||||
%}
|
||||
@@ -230,17 +234,17 @@ instruct zGetAndSetP(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
ins_pipe(pipe_serial);
|
||||
%}
|
||||
|
||||
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, rFlagsReg cr) %{
|
||||
instruct zGetAndSetPAcq(indirect mem, iRegP newv, iRegPNoSp prev, iRegPNoSp tmp) %{
|
||||
match(Set prev (GetAndSetP mem newv));
|
||||
predicate(UseZGC && ZGenerational && needs_acquiring_load_reserved(n) && n->as_LoadStore()->barrier_data() != 0);
|
||||
effect(TEMP_DEF prev, KILL cr);
|
||||
effect(TEMP_DEF prev, TEMP tmp);
|
||||
|
||||
ins_cost(2 * VOLATILE_REF_COST);
|
||||
|
||||
format %{ "atomic_xchg_acq $prev, $newv, [$mem], #@zGetAndSetPAcq" %}
|
||||
|
||||
ins_encode %{
|
||||
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, t1, true /* is_atomic */);
|
||||
z_store_barrier(_masm, this, Address($mem$$Register), $newv$$Register, $prev$$Register, $tmp$$Register, true /* is_atomic */);
|
||||
__ atomic_xchgal($prev$$Register, $prev$$Register, $mem$$Register);
|
||||
z_uncolor(_masm, this, $prev$$Register);
|
||||
%}
|
||||
|
||||
@@ -849,7 +849,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
|
||||
assert(lock_offset == 0,
|
||||
"displached header must be first word in BasicObjectLock");
|
||||
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
|
||||
cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, tmp, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 7) == 0, and
|
||||
@@ -963,7 +963,7 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
|
||||
beqz(header_reg, count);
|
||||
|
||||
// Atomic swap back the old header
|
||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
|
||||
cmpxchg_obj_header(swap_reg, header_reg, obj_reg, tmp_reg, count, /*fallthrough*/nullptr);
|
||||
}
|
||||
|
||||
// Call the runtime routine for slow case.
|
||||
|
||||
@@ -2520,9 +2520,9 @@ void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acqui
|
||||
|
||||
void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp,
|
||||
Label &succeed, Label *fail) {
|
||||
assert_different_registers(addr, tmp);
|
||||
assert_different_registers(newv, tmp);
|
||||
assert_different_registers(oldv, tmp);
|
||||
assert_different_registers(addr, tmp, t0);
|
||||
assert_different_registers(newv, tmp, t0);
|
||||
assert_different_registers(oldv, tmp, t0);
|
||||
|
||||
// oldv holds comparison value
|
||||
// newv holds value to write in exchange
|
||||
|
||||
@@ -1686,7 +1686,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ sd(swap_reg, Address(lock_reg, mark_word_offset));
|
||||
|
||||
// src -> dest if dest == x10 else x10 <- dest
|
||||
__ cmpxchg_obj_header(x10, lock_reg, obj_reg, t0, count, /*fallthrough*/nullptr);
|
||||
__ cmpxchg_obj_header(x10, lock_reg, obj_reg, lock_tmp, count, /*fallthrough*/nullptr);
|
||||
|
||||
// Test if the oopMark is an obvious stack pointer, i.e.,
|
||||
// 1) (mark & 3) == 0, and
|
||||
@@ -1826,7 +1826,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
// Atomic swap old header if oop still contains the stack lock
|
||||
Label count;
|
||||
__ cmpxchg_obj_header(x10, old_hdr, obj_reg, t0, count, &slow_path_unlock);
|
||||
__ cmpxchg_obj_header(x10, old_hdr, obj_reg, lock_tmp, count, &slow_path_unlock);
|
||||
__ bind(count);
|
||||
__ decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
|
||||
} else {
|
||||
|
||||
Reference in New Issue
Block a user