mirror of
https://github.com/JetBrains/JetBrainsRuntime.git
synced 2025-12-06 09:29:38 +01:00
8329088: Stack chunk thawing races with concurrent GC stack iteration
Reviewed-by: stefank, pchilanomate
This commit is contained in:
@@ -432,7 +432,7 @@ class SerializeClosure;
|
||||
template(yieldInfo_name, "yieldInfo") \
|
||||
template(tail_name, "tail") \
|
||||
template(size_name, "size") \
|
||||
template(argsize_name, "argsize") \
|
||||
template(bottom_name, "bottom") \
|
||||
template(mode_name, "mode") \
|
||||
template(numFrames_name, "numFrames") \
|
||||
template(numOops_name, "numOops") \
|
||||
|
||||
@@ -250,8 +250,8 @@ void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, o
|
||||
st->print_cr(" barriers: %d gc_mode: %d bitmap: %d parent: " PTR_FORMAT,
|
||||
c->requires_barriers(), c->is_gc_mode(), c->has_bitmap(), p2i(c->parent()));
|
||||
st->print_cr(" flags mixed: %d", c->has_mixed_frames());
|
||||
st->print_cr(" size: %d argsize: %d max_size: %d sp: %d pc: " PTR_FORMAT,
|
||||
c->stack_size(), c->argsize(), c->max_thawing_size(), c->sp(), p2i(c->pc()));
|
||||
st->print_cr(" size: %d bottom: %d max_size: %d sp: %d pc: " PTR_FORMAT,
|
||||
c->stack_size(), c->bottom(), c->max_thawing_size(), c->sp(), p2i(c->pc()));
|
||||
|
||||
if (verbose) {
|
||||
st->cr();
|
||||
|
||||
@@ -206,6 +206,8 @@ class oopDesc {
|
||||
jboolean bool_field_acquire(int offset) const;
|
||||
void release_bool_field_put(int offset, jboolean contents);
|
||||
|
||||
jint int_field_relaxed(int offset) const;
|
||||
void int_field_put_relaxed(int offset, jint contents);
|
||||
jint int_field_acquire(int offset) const;
|
||||
void release_int_field_put(int offset, jint contents);
|
||||
|
||||
|
||||
@@ -238,6 +238,8 @@ inline void oopDesc::short_field_put(int offset, jshort value) { *field_add
|
||||
|
||||
inline jint oopDesc::int_field(int offset) const { return *field_addr<jint>(offset); }
|
||||
inline void oopDesc::int_field_put(int offset, jint value) { *field_addr<jint>(offset) = value; }
|
||||
inline jint oopDesc::int_field_relaxed(int offset) const { return Atomic::load(field_addr<jint>(offset)); }
|
||||
inline void oopDesc::int_field_put_relaxed(int offset, jint value) { Atomic::store(field_addr<jint>(offset), value); }
|
||||
|
||||
inline jlong oopDesc::long_field(int offset) const { return *field_addr<jlong>(offset); }
|
||||
inline void oopDesc::long_field_put(int offset, jlong value) { *field_addr<jlong>(offset) = value; }
|
||||
|
||||
@@ -533,12 +533,12 @@ bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames,
|
||||
assert(oopDesc::is_oop(this), "");
|
||||
|
||||
assert(stack_size() >= 0, "");
|
||||
assert(argsize() >= 0, "");
|
||||
assert(!has_bitmap() || is_gc_mode(), "");
|
||||
|
||||
if (is_empty()) {
|
||||
assert(argsize() == 0, "");
|
||||
assert(max_thawing_size() == 0, "");
|
||||
} else {
|
||||
assert(argsize() >= 0, "");
|
||||
}
|
||||
|
||||
assert(oopDesc::is_oop_or_null(parent()), "");
|
||||
@@ -547,7 +547,7 @@ bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames,
|
||||
|
||||
// If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
|
||||
// for the top frame (below sp), and *not* for the bottom frame.
|
||||
int size = stack_size() - argsize() - sp();
|
||||
int size = bottom() - sp();
|
||||
assert(size >= 0, "");
|
||||
assert((size == 0) == is_empty(), "");
|
||||
|
||||
@@ -570,12 +570,15 @@ bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames,
|
||||
assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
|
||||
|
||||
if (!concurrent) {
|
||||
assert(closure._size <= size + argsize() + frame::metadata_words,
|
||||
"size: %d argsize: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
|
||||
size, argsize(), closure._size, closure._sp - start_address(), sp(), stack_size());
|
||||
assert(argsize() == closure._argsize - (closure._num_frames > 0 ? frame::metadata_words_at_top : 0),
|
||||
"argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
|
||||
argsize(), closure._argsize, closure._callee_interpreted);
|
||||
assert(closure._size <= size + (stack_size() - bottom()),
|
||||
"size: %d bottom: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
|
||||
size, bottom(), closure._size, closure._sp - start_address(), sp(), stack_size());
|
||||
if (closure._num_frames > 0) {
|
||||
assert(closure._argsize >= frame::metadata_words_at_top, "should be set up");
|
||||
assert(argsize() == closure._argsize - frame::metadata_words_at_top,
|
||||
"argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
|
||||
argsize(), closure._argsize, closure._callee_interpreted);
|
||||
}
|
||||
|
||||
int calculated_max_size = closure._size
|
||||
+ closure._num_i2c * frame::align_wiggle
|
||||
|
||||
@@ -101,6 +101,7 @@ public:
|
||||
inline void set_cont_access(oop value);
|
||||
|
||||
inline int bottom() const;
|
||||
inline void set_bottom(int value);
|
||||
|
||||
inline HeapWord* start_of_stack() const;
|
||||
|
||||
|
||||
@@ -60,15 +60,15 @@ inline void stackChunkOopDesc::set_parent_access(oop value) { jdk_internal_vm
|
||||
|
||||
inline int stackChunkOopDesc::stack_size() const { return jdk_internal_vm_StackChunk::size(as_oop()); }
|
||||
|
||||
inline int stackChunkOopDesc::bottom() const { return jdk_internal_vm_StackChunk::bottom(as_oop()); }
|
||||
inline void stackChunkOopDesc::set_bottom(int value) { jdk_internal_vm_StackChunk::set_bottom(this, value); }
|
||||
|
||||
inline int stackChunkOopDesc::sp() const { return jdk_internal_vm_StackChunk::sp(as_oop()); }
|
||||
inline void stackChunkOopDesc::set_sp(int value) { jdk_internal_vm_StackChunk::set_sp(this, value); }
|
||||
|
||||
inline address stackChunkOopDesc::pc() const { return jdk_internal_vm_StackChunk::pc(as_oop()); }
|
||||
inline void stackChunkOopDesc::set_pc(address value) { jdk_internal_vm_StackChunk::set_pc(this, value); }
|
||||
|
||||
inline int stackChunkOopDesc::argsize() const { return jdk_internal_vm_StackChunk::argsize(as_oop()); }
|
||||
inline void stackChunkOopDesc::set_argsize(int value) { jdk_internal_vm_StackChunk::set_argsize(as_oop(), value); }
|
||||
|
||||
inline uint8_t stackChunkOopDesc::flags() const { return jdk_internal_vm_StackChunk::flags(as_oop()); }
|
||||
inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
|
||||
|
||||
@@ -108,7 +108,10 @@ inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_Stac
|
||||
template<DecoratorSet decorators>
|
||||
inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
|
||||
|
||||
inline int stackChunkOopDesc::bottom() const { return stack_size() - argsize() - frame::metadata_words_at_top; }
|
||||
inline int stackChunkOopDesc::argsize() const {
|
||||
assert(!is_empty(), "should not ask for argsize in empty chunk");
|
||||
return stack_size() - bottom() - frame::metadata_words_at_top;
|
||||
}
|
||||
|
||||
inline HeapWord* stackChunkOopDesc::start_of_stack() const {
|
||||
return (HeapWord*)(cast_from_oop<intptr_t>(as_oop()) + InstanceStackChunkKlass::offset_of_stack());
|
||||
@@ -132,10 +135,8 @@ inline intptr_t* stackChunkOopDesc::from_offset(int offset) const {
|
||||
}
|
||||
|
||||
inline bool stackChunkOopDesc::is_empty() const {
|
||||
assert(sp() <= stack_size(), "");
|
||||
assert((sp() == stack_size()) == (sp() >= stack_size() - argsize() - frame::metadata_words_at_top),
|
||||
"sp: %d size: %d argsize: %d", sp(), stack_size(), argsize());
|
||||
return sp() == stack_size();
|
||||
assert(sp() <= bottom(), "");
|
||||
return sp() == bottom();
|
||||
}
|
||||
|
||||
inline bool stackChunkOopDesc::is_in_chunk(void* p) const {
|
||||
|
||||
@@ -403,7 +403,7 @@ protected:
|
||||
inline void patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp);
|
||||
|
||||
// slow path
|
||||
virtual stackChunkOop allocate_chunk_slow(size_t stack_size) = 0;
|
||||
virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) = 0;
|
||||
|
||||
int cont_size() { return pointer_delta_as_int(_cont_stack_bottom, _cont_stack_top); }
|
||||
|
||||
@@ -438,20 +438,12 @@ private:
|
||||
protected:
|
||||
void freeze_fast_copy(stackChunkOop chunk, int chunk_start_sp CONT_JFR_ONLY(COMMA bool chunk_is_allocated));
|
||||
bool freeze_fast_new_chunk(stackChunkOop chunk);
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_empty(stackChunkOop chunk) {
|
||||
// during freeze, the chunk is in an intermediate state (after setting the chunk's argsize but before setting its
|
||||
// ultimate sp) so we use this instead of stackChunkOopDesc::is_empty
|
||||
return chunk->sp() >= chunk->stack_size() - chunk->argsize() - frame::metadata_words_at_top;
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
template <typename ConfigT>
|
||||
class Freeze : public FreezeBase {
|
||||
private:
|
||||
stackChunkOop allocate_chunk(size_t stack_size);
|
||||
stackChunkOop allocate_chunk(size_t stack_size, int argsize_md);
|
||||
|
||||
public:
|
||||
inline Freeze(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp)
|
||||
@@ -460,7 +452,7 @@ public:
|
||||
freeze_result try_freeze_fast();
|
||||
|
||||
protected:
|
||||
virtual stackChunkOop allocate_chunk_slow(size_t stack_size) override { return allocate_chunk(stack_size); }
|
||||
virtual stackChunkOop allocate_chunk_slow(size_t stack_size, int argsize_md) override { return allocate_chunk(stack_size, argsize_md); }
|
||||
};
|
||||
|
||||
FreezeBase::FreezeBase(JavaThread* thread, ContinuationWrapper& cont, intptr_t* frame_sp) :
|
||||
@@ -543,7 +535,7 @@ freeze_result Freeze<ConfigT>::try_freeze_fast() {
|
||||
DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
|
||||
assert(_fast_freeze_size == 0, "");
|
||||
|
||||
stackChunkOop chunk = allocate_chunk(cont_size() + frame::metadata_words);
|
||||
stackChunkOop chunk = allocate_chunk(cont_size() + frame::metadata_words, _cont.argsize() + frame::metadata_words_at_top);
|
||||
if (freeze_fast_new_chunk(chunk)) {
|
||||
return freeze_ok;
|
||||
}
|
||||
@@ -572,7 +564,7 @@ int FreezeBase::size_if_fast_freeze_available() {
|
||||
// so we subtract it only if we overlap with the caller, i.e. the current chunk isn't empty.
|
||||
// Consider leaving the chunk's argsize set when emptying it and removing the following branch,
|
||||
// although that would require changing stackChunkOopDesc::is_empty
|
||||
if (chunk_sp < chunk->stack_size()) {
|
||||
if (!chunk->is_empty()) {
|
||||
total_size_needed -= _cont.argsize() + frame::metadata_words_at_top;
|
||||
}
|
||||
|
||||
@@ -585,14 +577,13 @@ int FreezeBase::size_if_fast_freeze_available() {
|
||||
|
||||
void FreezeBase::freeze_fast_existing_chunk() {
|
||||
stackChunkOop chunk = _cont.tail();
|
||||
DEBUG_ONLY(_orig_chunk_sp = chunk->sp_address();)
|
||||
|
||||
DEBUG_ONLY(_fast_freeze_size = size_if_fast_freeze_available();)
|
||||
assert(_fast_freeze_size > 0, "");
|
||||
|
||||
if (chunk->sp() < chunk->stack_size()) { // we are copying into a non-empty chunk
|
||||
if (!chunk->is_empty()) { // we are copying into a non-empty chunk
|
||||
DEBUG_ONLY(_empty = false;)
|
||||
assert(chunk->sp() < (chunk->stack_size() - chunk->argsize()), "");
|
||||
DEBUG_ONLY(_orig_chunk_sp = chunk->sp_address();)
|
||||
#ifdef ASSERT
|
||||
{
|
||||
intptr_t* retaddr_slot = (chunk->sp_address()
|
||||
@@ -630,13 +621,14 @@ void FreezeBase::freeze_fast_existing_chunk() {
|
||||
|
||||
freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
|
||||
} else { // the chunk is empty
|
||||
DEBUG_ONLY(_empty = true;)
|
||||
const int chunk_start_sp = chunk->sp();
|
||||
const int chunk_start_sp = chunk->stack_size();
|
||||
|
||||
assert(chunk_start_sp == chunk->stack_size(), "");
|
||||
DEBUG_ONLY(_empty = true;)
|
||||
DEBUG_ONLY(_orig_chunk_sp = chunk->start_address() + chunk_start_sp;)
|
||||
|
||||
chunk->set_max_thawing_size(cont_size());
|
||||
chunk->set_argsize(_cont.argsize());
|
||||
chunk->set_bottom(chunk_start_sp - _cont.argsize() - frame::metadata_words_at_top);
|
||||
chunk->set_sp(chunk->bottom());
|
||||
|
||||
freeze_fast_copy(chunk, chunk_start_sp CONT_JFR_ONLY(COMMA false));
|
||||
}
|
||||
@@ -654,7 +646,6 @@ bool FreezeBase::freeze_fast_new_chunk(stackChunkOop chunk) {
|
||||
}
|
||||
|
||||
chunk->set_max_thawing_size(cont_size());
|
||||
chunk->set_argsize(_cont.argsize());
|
||||
|
||||
// in a fresh chunk, we freeze *with* the bottom-most frame's stack arguments.
|
||||
// They'll then be stored twice: in the chunk and in the parent chunk's top frame
|
||||
@@ -931,7 +922,6 @@ freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, in
|
||||
int overlap = 0; // the args overlap the caller -- if there is one in this chunk and is of the same kind
|
||||
int unextended_sp = -1;
|
||||
if (chunk != nullptr) {
|
||||
unextended_sp = chunk->sp();
|
||||
if (!chunk->is_empty()) {
|
||||
StackChunkFrameStream<ChunkFrames::Mixed> last(chunk);
|
||||
unextended_sp = chunk->to_offset(StackChunkFrameStream<ChunkFrames::Mixed>(chunk).unextended_sp());
|
||||
@@ -939,6 +929,8 @@ freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, in
|
||||
if (callee.is_interpreted_frame() == top_interpreted) {
|
||||
overlap = argsize_md;
|
||||
}
|
||||
} else {
|
||||
unextended_sp = chunk->stack_size() - frame::metadata_words_at_top;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -976,25 +968,21 @@ freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, in
|
||||
_freeze_size += overlap; // we're allocating a new chunk, so no overlap
|
||||
// overlap = 0;
|
||||
|
||||
chunk = allocate_chunk_slow(_freeze_size);
|
||||
chunk = allocate_chunk_slow(_freeze_size, argsize_md);
|
||||
if (chunk == nullptr) {
|
||||
return freeze_exception;
|
||||
}
|
||||
|
||||
// Install new chunk
|
||||
_cont.set_tail(chunk);
|
||||
|
||||
int sp = chunk->stack_size() - argsize_md;
|
||||
chunk->set_sp(sp);
|
||||
chunk->set_argsize(argsize);
|
||||
assert(is_empty(chunk), "");
|
||||
assert(chunk->is_empty(), "");
|
||||
} else {
|
||||
// REUSE EXISTING CHUNK
|
||||
log_develop_trace(continuations)("Reusing chunk mixed: %d empty: %d", chunk->has_mixed_frames(), chunk->is_empty());
|
||||
if (chunk->is_empty()) {
|
||||
int sp = chunk->stack_size() - argsize_md;
|
||||
chunk->set_sp(sp);
|
||||
chunk->set_argsize(argsize);
|
||||
chunk->set_bottom(sp);
|
||||
_freeze_size += overlap;
|
||||
assert(chunk->max_thawing_size() == 0, "");
|
||||
} DEBUG_ONLY(else empty_chunk = false;)
|
||||
@@ -1004,10 +992,10 @@ freeze_result FreezeBase::finalize_freeze(const frame& callee, frame& caller, in
|
||||
chunk->set_has_mixed_frames(true);
|
||||
|
||||
assert(chunk->requires_barriers() == _barriers, "");
|
||||
assert(!_barriers || is_empty(chunk), "");
|
||||
assert(!_barriers || chunk->is_empty(), "");
|
||||
|
||||
assert(!is_empty(chunk) || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
|
||||
assert(!is_empty(chunk) || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
|
||||
assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).is_done(), "");
|
||||
assert(!chunk->is_empty() || StackChunkFrameStream<ChunkFrames::Mixed>(chunk).to_frame().is_empty(), "");
|
||||
|
||||
// We unwind frames after the last safepoint so that the GC will have found the oops in the frames, but before
|
||||
// writing into the chunk. This is so that an asynchronous stack walk (not at a safepoint) that suspends us here
|
||||
@@ -1053,7 +1041,7 @@ void FreezeBase::patch(const frame& f, frame& hf, const frame& caller, bool is_b
|
||||
// If we're the bottom frame, we need to replace the return barrier with the real
|
||||
// caller's pc.
|
||||
address last_pc = caller.pc();
|
||||
assert((last_pc == nullptr) == is_empty(_cont.tail()), "");
|
||||
assert((last_pc == nullptr) == _cont.tail()->is_empty(), "");
|
||||
ContinuationHelper::Frame::patch_pc(caller, last_pc);
|
||||
} else {
|
||||
assert(!caller.is_empty(), "");
|
||||
@@ -1306,6 +1294,7 @@ inline bool FreezeBase::stack_overflow() { // detect stack overflow in recursive
|
||||
|
||||
class StackChunkAllocator : public MemAllocator {
|
||||
const size_t _stack_size;
|
||||
int _argsize_md;
|
||||
ContinuationWrapper& _continuation_wrapper;
|
||||
JvmtiSampledObjectAllocEventCollector* const _jvmti_event_collector;
|
||||
mutable bool _took_slow_path;
|
||||
@@ -1321,8 +1310,11 @@ class StackChunkAllocator : public MemAllocator {
|
||||
const size_t hs = oopDesc::header_size();
|
||||
Copy::fill_to_aligned_words(mem + hs, vmClasses::StackChunk_klass()->size_helper() - hs);
|
||||
|
||||
int bottom = (int)_stack_size - _argsize_md;
|
||||
|
||||
jdk_internal_vm_StackChunk::set_size(mem, (int)_stack_size);
|
||||
jdk_internal_vm_StackChunk::set_sp(mem, (int)_stack_size);
|
||||
jdk_internal_vm_StackChunk::set_bottom(mem, bottom);
|
||||
jdk_internal_vm_StackChunk::set_sp(mem, bottom);
|
||||
|
||||
return finish(mem);
|
||||
}
|
||||
@@ -1346,10 +1338,12 @@ public:
|
||||
size_t word_size,
|
||||
Thread* thread,
|
||||
size_t stack_size,
|
||||
int argsize_md,
|
||||
ContinuationWrapper& continuation_wrapper,
|
||||
JvmtiSampledObjectAllocEventCollector* jvmti_event_collector)
|
||||
: MemAllocator(klass, word_size, thread),
|
||||
_stack_size(stack_size),
|
||||
_argsize_md(argsize_md),
|
||||
_continuation_wrapper(continuation_wrapper),
|
||||
_jvmti_event_collector(jvmti_event_collector),
|
||||
_took_slow_path(false) {}
|
||||
@@ -1383,7 +1377,7 @@ public:
|
||||
};
|
||||
|
||||
template <typename ConfigT>
|
||||
stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size) {
|
||||
stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size, int argsize_md) {
|
||||
log_develop_trace(continuations)("allocate_chunk allocating new chunk");
|
||||
|
||||
InstanceStackChunkKlass* klass = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass());
|
||||
@@ -1405,7 +1399,7 @@ stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size) {
|
||||
// instrumentation have been deferred. This property is important for
|
||||
// some GCs, as this ensures that the allocated object is in the young
|
||||
// generation / newly allocated memory.
|
||||
StackChunkAllocator allocator(klass, size_in_words, current, stack_size, _cont, _jvmti_event_collector);
|
||||
StackChunkAllocator allocator(klass, size_in_words, current, stack_size, argsize_md, _cont, _jvmti_event_collector);
|
||||
stackChunkOop chunk = allocator.allocate();
|
||||
|
||||
if (chunk == nullptr) {
|
||||
@@ -1415,11 +1409,11 @@ stackChunkOop Freeze<ConfigT>::allocate_chunk(size_t stack_size) {
|
||||
// assert that chunk is properly initialized
|
||||
assert(chunk->stack_size() == (int)stack_size, "");
|
||||
assert(chunk->size() >= stack_size, "chunk->size(): %zu size: %zu", chunk->size(), stack_size);
|
||||
assert(chunk->sp() == chunk->stack_size(), "");
|
||||
assert(chunk->sp() == chunk->bottom(), "");
|
||||
assert((intptr_t)chunk->start_address() % 8 == 0, "");
|
||||
assert(chunk->max_thawing_size() == 0, "");
|
||||
assert(chunk->pc() == nullptr, "");
|
||||
assert(chunk->argsize() == 0, "");
|
||||
assert(chunk->is_empty(), "");
|
||||
assert(chunk->flags() == 0, "");
|
||||
assert(chunk->is_gc_mode() == false, "");
|
||||
|
||||
@@ -1852,12 +1846,11 @@ public:
|
||||
};
|
||||
|
||||
inline void ThawBase::clear_chunk(stackChunkOop chunk) {
|
||||
chunk->set_sp(chunk->stack_size());
|
||||
chunk->set_argsize(0);
|
||||
chunk->set_sp(chunk->bottom());
|
||||
chunk->set_max_thawing_size(0);
|
||||
}
|
||||
|
||||
int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
|
||||
int ThawBase::remove_top_compiled_frame_from_chunk(stackChunkOop chunk, int &argsize) {
|
||||
bool empty = false;
|
||||
StackChunkFrameStream<ChunkFrames::CompiledOnly> f(chunk);
|
||||
DEBUG_ONLY(intptr_t* const chunk_sp = chunk->start_address() + chunk->sp();)
|
||||
@@ -2104,8 +2097,7 @@ void ThawBase::finalize_thaw(frame& entry, int argsize) {
|
||||
chunk->set_sp(chunk->to_offset(_stream.sp()));
|
||||
chunk->set_pc(_stream.pc());
|
||||
} else {
|
||||
chunk->set_argsize(0);
|
||||
chunk->set_sp(chunk->stack_size());
|
||||
chunk->set_sp(chunk->bottom());
|
||||
chunk->set_pc(nullptr);
|
||||
}
|
||||
assert(_stream.is_done() == chunk->is_empty(), "");
|
||||
@@ -2377,7 +2369,6 @@ void ThawBase::finish_thaw(frame& f) {
|
||||
chunk->set_has_mixed_frames(false);
|
||||
}
|
||||
chunk->set_max_thawing_size(0);
|
||||
assert(chunk->argsize() == 0, "");
|
||||
} else {
|
||||
chunk->set_max_thawing_size(chunk->max_thawing_size() - _align_size);
|
||||
}
|
||||
|
||||
@@ -84,7 +84,7 @@ int jdk_internal_vm_StackChunk::_parent_offset;
|
||||
int jdk_internal_vm_StackChunk::_size_offset;
|
||||
int jdk_internal_vm_StackChunk::_sp_offset;
|
||||
int jdk_internal_vm_StackChunk::_pc_offset;
|
||||
int jdk_internal_vm_StackChunk::_argsize_offset;
|
||||
int jdk_internal_vm_StackChunk::_bottom_offset;
|
||||
int jdk_internal_vm_StackChunk::_flags_offset;
|
||||
int jdk_internal_vm_StackChunk::_maxThawingSize_offset;
|
||||
int jdk_internal_vm_StackChunk::_cont_offset;
|
||||
@@ -93,7 +93,7 @@ int jdk_internal_vm_StackChunk::_cont_offset;
|
||||
macro(_parent_offset, k, vmSymbols::parent_name(), stackchunk_signature, false); \
|
||||
macro(_size_offset, k, vmSymbols::size_name(), int_signature, false); \
|
||||
macro(_sp_offset, k, vmSymbols::sp_name(), int_signature, false); \
|
||||
macro(_argsize_offset, k, vmSymbols::argsize_name(), int_signature, false);
|
||||
macro(_bottom_offset, k, vmSymbols::bottom_name(), int_signature, false);
|
||||
|
||||
void jdk_internal_vm_StackChunk::compute_offsets() {
|
||||
InstanceKlass* k = vmClasses::StackChunk_klass();
|
||||
|
||||
@@ -83,7 +83,7 @@ class jdk_internal_vm_StackChunk: AllStatic {
|
||||
static int _size_offset;
|
||||
static int _sp_offset;
|
||||
static int _pc_offset;
|
||||
static int _argsize_offset;
|
||||
static int _bottom_offset;
|
||||
static int _flags_offset;
|
||||
static int _maxThawingSize_offset;
|
||||
static int _cont_offset;
|
||||
@@ -112,8 +112,9 @@ class jdk_internal_vm_StackChunk: AllStatic {
|
||||
static inline void set_sp(HeapWord* chunk, int value); // used while allocating
|
||||
static inline address pc(oop chunk);
|
||||
static inline void set_pc(oop chunk, address value);
|
||||
static inline int argsize(oop chunk);
|
||||
static inline void set_argsize(oop chunk, int value);
|
||||
static inline int bottom(oop chunk);
|
||||
static inline void set_bottom(oop chunk, int value);
|
||||
static inline void set_bottom(HeapWord* chunk, int value);
|
||||
static inline uint8_t flags(oop chunk);
|
||||
static inline void set_flags(oop chunk, uint8_t value);
|
||||
static inline uint8_t flags_acquire(oop chunk);
|
||||
|
||||
@@ -115,12 +115,19 @@ inline void jdk_internal_vm_StackChunk::set_size(HeapWord* chunk, int value) {
|
||||
*(int*)(((char*)chunk) + _size_offset) = (int)value;
|
||||
}
|
||||
|
||||
inline void jdk_internal_vm_StackChunk::set_bottom(HeapWord* chunk, int value) {
|
||||
// Used by StackChunkAllocator before the Object has been finished,
|
||||
// so don't cast too oop and use int_field_put in this function.
|
||||
assert(_bottom_offset != 0, "must be set");
|
||||
*(int*)(((char*)chunk) + _bottom_offset) = (int)value;
|
||||
}
|
||||
|
||||
inline int jdk_internal_vm_StackChunk::sp(oop chunk) {
|
||||
return chunk->int_field(_sp_offset);
|
||||
return chunk->int_field_relaxed(_sp_offset);
|
||||
}
|
||||
|
||||
inline void jdk_internal_vm_StackChunk::set_sp(oop chunk, int value) {
|
||||
chunk->int_field_put(_sp_offset, value);
|
||||
chunk->int_field_put_relaxed(_sp_offset, value);
|
||||
}
|
||||
|
||||
inline void jdk_internal_vm_StackChunk::set_sp(HeapWord* chunk, int value) {
|
||||
@@ -138,12 +145,12 @@ inline void jdk_internal_vm_StackChunk::set_pc(oop chunk, address value) {
|
||||
chunk->address_field_put(_pc_offset, value);
|
||||
}
|
||||
|
||||
inline int jdk_internal_vm_StackChunk::argsize(oop chunk) {
|
||||
return chunk->int_field(_argsize_offset);
|
||||
inline int jdk_internal_vm_StackChunk::bottom(oop chunk) {
|
||||
return chunk->int_field(_bottom_offset);
|
||||
}
|
||||
|
||||
inline void jdk_internal_vm_StackChunk::set_argsize(oop chunk, int value) {
|
||||
chunk->int_field_put(_argsize_offset, value);
|
||||
inline void jdk_internal_vm_StackChunk::set_bottom(oop chunk, int value) {
|
||||
chunk->int_field_put(_bottom_offset, value);
|
||||
}
|
||||
|
||||
inline uint8_t jdk_internal_vm_StackChunk::flags(oop chunk) {
|
||||
|
||||
@@ -31,10 +31,10 @@ public final class StackChunk {
|
||||
private StackChunk parent;
|
||||
private int size; // in words
|
||||
private int sp; // in words
|
||||
private int argsize; // bottom stack-passed arguments, in words
|
||||
private int bottom; // in words
|
||||
|
||||
// The stack itself is appended here by the VM, as well as some injected fields
|
||||
|
||||
public StackChunk parent() { return parent; }
|
||||
public boolean isEmpty() { return sp >= (size - argsize); }
|
||||
public boolean isEmpty() { return sp == bottom; }
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user