8315503: G1: Code root scan causes long GC pauses due to imbalanced iteration

Co-authored-by: Ivan Walulya <iwalulya@openjdk.org>
Reviewed-by: iwalulya, ayang
This commit is contained in:
Thomas Schatzl
2023-10-02 07:35:02 +00:00
parent 207819a05e
commit 795e5dcc85
13 changed files with 268 additions and 164 deletions

View File

@@ -28,82 +28,258 @@
#include "code/nmethod.hpp"
#include "gc/g1/g1CodeRootSet.hpp"
#include "gc/g1/heapRegion.hpp"
#include "memory/allocation.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "utilities/concurrentHashTable.inline.hpp"
#include "utilities/concurrentHashTableTasks.inline.hpp"
void G1CodeRootSet::add(nmethod* nm) {
assert(_is_iterating == false, "should not mutate while iterating the table");
bool added = false;
if (_table == nullptr) {
_table = new (mtGC) Table(SmallSize, LargeSize);
class G1CodeRootSetHashTableConfig : public StackObj {
public:
using Value = nmethod*;
static uintx get_hash(Value const& value, bool* is_dead);
static void* allocate_node(void* context, size_t size, Value const& value) {
return AllocateHeap(size, mtGC);
}
added = _table->put(nm, nm);
if (added && _table->table_size() == SmallSize && length() == Threshold) {
_table->resize(LargeSize);
static void free_node(void* context, void* memory, Value const& value) {
FreeHeap(memory);
}
};
// Storage container for the code root set.
class G1CodeRootSetHashTable : public CHeapObj<mtGC> {
using HashTable = ConcurrentHashTable<G1CodeRootSetHashTableConfig, mtGC>;
using HashTableScanTask = HashTable::ScanTask;
// Default (log2) number of buckets; small since typically we do not expect many
// entries.
static const size_t Log2DefaultNumBuckets = 2;
static const uint BucketClaimSize = 16;
HashTable _table;
HashTableScanTask _table_scanner;
size_t volatile _num_entries;
bool is_empty() const { return number_of_entries() == 0; }
class HashTableLookUp : public StackObj {
nmethod* _nmethod;
public:
explicit HashTableLookUp(nmethod* nmethod) : _nmethod(nmethod) { }
uintx get_hash() const;
bool equals(nmethod** value);
bool is_dead(nmethod** value) const { return false; }
};
class HashTableIgnore : public StackObj {
public:
HashTableIgnore() { }
void operator()(nmethod** value) { /* do nothing */ }
};
public:
G1CodeRootSetHashTable() :
_table(Log2DefaultNumBuckets,
HashTable::DEFAULT_MAX_SIZE_LOG2),
_table_scanner(&_table, BucketClaimSize), _num_entries(0) {
clear();
}
// Robert Jenkins 1996 & Thomas Wang 1997
// http://web.archive.org/web/20071223173210/http://www.concentric.net/~Ttwang/tech/inthash.htm
static uint32_t hash(uint32_t key) {
key = ~key + (key << 15);
key = key ^ (key >> 12);
key = key + (key << 2);
key = key ^ (key >> 4);
key = key * 2057;
key = key ^ (key >> 16);
return key;
}
static uintx get_hash(nmethod* nmethod) {
uintptr_t value = (uintptr_t)nmethod;
// The CHT only uses the bits smaller than HashTable::DEFAULT_MAX_SIZE_LOG2, so
// try to increase the randomness by incorporating the upper bits of the
// address too.
STATIC_ASSERT(HashTable::DEFAULT_MAX_SIZE_LOG2 <= sizeof(uint32_t) * BitsPerByte);
#ifdef _LP64
return hash((uint32_t)value ^ (uint32_t(value >> 32)));
#else
return hash((uint32_t)value);
#endif
}
void insert(nmethod* method) {
HashTableLookUp lookup(method);
bool grow_hint = false;
bool inserted = _table.insert(Thread::current(), lookup, method, &grow_hint);
if (inserted) {
Atomic::inc(&_num_entries);
}
if (grow_hint) {
_table.grow(Thread::current());
}
}
bool remove(nmethod* method) {
HashTableLookUp lookup(method);
bool removed = _table.remove(Thread::current(), lookup);
if (removed) {
Atomic::dec(&_num_entries);
}
return removed;
}
bool contains(nmethod* method) {
HashTableLookUp lookup(method);
HashTableIgnore ignore;
return _table.get(Thread::current(), lookup, ignore);
}
void clear() {
_table.unsafe_reset();
Atomic::store(&_num_entries, (size_t)0);
}
void iterate_at_safepoint(CodeBlobClosure* blk) {
assert_at_safepoint();
// A lot of code root sets are typically empty.
if (is_empty()) {
return;
}
auto do_value =
[&] (nmethod** value) {
blk->do_code_blob(*value);
return true;
};
_table_scanner.do_safepoint_scan(do_value);
}
// Removes entries as indicated by the given EVAL closure.
template <class EVAL>
void clean(EVAL& eval) {
// A lot of code root sets are typically empty.
if (is_empty()) {
return;
}
size_t num_deleted = 0;
auto do_delete =
[&] (nmethod** value) {
num_deleted++;
};
bool succeeded = _table.try_bulk_delete(Thread::current(), eval, do_delete);
guarantee(succeeded, "unable to clean table");
if (num_deleted != 0) {
size_t current_size = Atomic::sub(&_num_entries, num_deleted);
shrink_to_match(current_size);
}
}
// Calculate the log2 of the table size we want to shrink to.
size_t log2_target_shrink_size(size_t current_size) const {
// A table with the new size should be at most filled by this factor. Otherwise
// we would grow again quickly.
const float WantedLoadFactor = 0.5;
size_t min_expected_size = checked_cast<size_t>(ceil(current_size / WantedLoadFactor));
size_t result = Log2DefaultNumBuckets;
if (min_expected_size != 0) {
size_t log2_bound = checked_cast<size_t>(log2i_exact(round_up_power_of_2(min_expected_size)));
result = clamp(log2_bound, Log2DefaultNumBuckets, HashTable::DEFAULT_MAX_SIZE_LOG2);
}
return result;
}
// Shrink to keep table size appropriate to the given number of entries.
void shrink_to_match(size_t current_size) {
size_t prev_log2size = _table.get_size_log2(Thread::current());
size_t new_log2_table_size = log2_target_shrink_size(current_size);
if (new_log2_table_size < prev_log2size) {
_table.shrink(Thread::current(), new_log2_table_size);
}
}
void reset_table_scanner() {
_table_scanner.set(&_table, BucketClaimSize);
}
size_t mem_size() { return sizeof(*this) + _table.get_mem_size(Thread::current()); }
size_t number_of_entries() const { return Atomic::load(&_num_entries); }
};
uintx G1CodeRootSetHashTable::HashTableLookUp::get_hash() const {
return G1CodeRootSetHashTable::get_hash(_nmethod);
}
bool G1CodeRootSetHashTable::HashTableLookUp::equals(nmethod** value) {
return *value == _nmethod;
}
uintx G1CodeRootSetHashTableConfig::get_hash(Value const& value, bool* is_dead) {
*is_dead = false;
return G1CodeRootSetHashTable::get_hash(value);
}
size_t G1CodeRootSet::length() const { return _table->number_of_entries(); }
void G1CodeRootSet::add(nmethod* method) {
if (!contains(method)) {
assert(!_is_iterating, "must be");
_table->insert(method);
}
}
G1CodeRootSet::G1CodeRootSet() :
_table(new G1CodeRootSetHashTable())
DEBUG_ONLY(COMMA _is_iterating(false)) { }
G1CodeRootSet::~G1CodeRootSet() {
delete _table;
}
bool G1CodeRootSet::remove(nmethod* method) {
assert(_is_iterating == false, "should not mutate while iterating the table");
bool removed = false;
if (_table != nullptr) {
removed = _table->remove(method);
}
if (removed) {
if (length() == 0) {
clear();
}
}
return removed;
assert(!_is_iterating, "should not mutate while iterating the table");
return _table->remove(method);
}
bool G1CodeRootSet::contains(nmethod* method) {
if (_table != nullptr) {
return _table->contains(method);
}
return false;
return _table->contains(method);
}
void G1CodeRootSet::clear() {
assert(_is_iterating == false, "should not mutate while iterating the table");
delete _table;
_table = nullptr;
assert(!_is_iterating, "should not mutate while iterating the table");
_table->clear();
}
size_t G1CodeRootSet::mem_size() {
return (_table == nullptr)
? sizeof(*this)
: sizeof(*this) + _table->mem_size();
return sizeof(*this) + _table->mem_size();
}
void G1CodeRootSet::reset_table_scanner() {
_table->reset_table_scanner();
}
void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const {
DEBUG_ONLY(_is_iterating = true;)
if (_table != nullptr) {
_table->iterate_all([&](nmethod* nm, nmethod* _) {
blk->do_code_blob(nm);
});
}
_table->iterate_at_safepoint(blk);
DEBUG_ONLY(_is_iterating = false;)
}
class CleanCallback : public StackObj {
NONCOPYABLE(CleanCallback); // can not copy, _blobs will point to old copy
class PointsIntoHRDetectionClosure : public OopClosure {
HeapRegion* _hr;
public:
bool _points_into;
PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
void do_oop(narrowOop* o) {
do_oop_work(o);
}
void do_oop(oop* o) {
do_oop_work(o);
}
template <typename T>
void do_oop_work(T* p) {
@@ -111,6 +287,14 @@ class CleanCallback : public StackObj {
_points_into = true;
}
}
public:
bool _points_into;
PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {}
void do_oop(narrowOop* o) { do_oop_work(o); }
void do_oop(oop* o) { do_oop_work(o); }
};
PointsIntoHRDetectionClosure _detector;
@@ -119,20 +303,16 @@ class CleanCallback : public StackObj {
public:
CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {}
bool do_entry(nmethod* nm, nmethod* _) {
bool operator()(nmethod** value) {
_detector._points_into = false;
_blobs.do_code_blob(nm);
_blobs.do_code_blob(*value);
return !_detector._points_into;
}
};
void G1CodeRootSet::clean(HeapRegion* owner) {
assert(_is_iterating == false, "should not mutate while iterating the table");
CleanCallback should_clean(owner);
if (_table != nullptr) {
_table->unlink(&should_clean);
}
if (length() == 0) {
clear();
}
assert(!_is_iterating, "should not mutate while iterating the table");
CleanCallback eval(owner);
_table->clean(eval);
}

View File

@@ -27,43 +27,37 @@
#include "code/codeCache.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/resizeableResourceHash.hpp"
class G1CodeRootSetHashTable;
class HeapRegion;
class nmethod;
// Implements storage for a set of code roots.
// This class is not thread safe, locks are needed.
// This class is thread safe.
class G1CodeRootSet {
friend class G1CodeRootSetTest;
friend class G1CodeRootSetTest_g1_code_cache_rem_set_vm_Test;
private:
const static size_t SmallSize = 32;
const static size_t Threshold = 24;
const static size_t LargeSize = 512;
using Table = ResizeableResourceHashtable<nmethod*, nmethod*, AnyObj::C_HEAP, mtGC>;
Table* _table;
G1CodeRootSetHashTable* _table;
DEBUG_ONLY(mutable bool _is_iterating;)
public:
G1CodeRootSet() : _table(nullptr) DEBUG_ONLY(COMMA _is_iterating(false)) {}
G1CodeRootSet();
~G1CodeRootSet();
void add(nmethod* method);
bool remove(nmethod* method);
bool contains(nmethod* method);
void clear();
// Prepare for MT iteration. Must be called before nmethods_do.
void reset_table_scanner();
void nmethods_do(CodeBlobClosure* blk) const;
// Remove all nmethods which no longer contain pointers into our "owner" region
// Remove all nmethods which no longer contain pointers into our "owner" region.
void clean(HeapRegion* owner);
bool is_empty() { return length() == 0;}
// Length in elements
size_t length() const { return _table == nullptr ? 0 : _table->number_of_entries(); }
size_t length() const;
// Memory size in bytes taken by this set.
size_t mem_size();

View File

@@ -3009,8 +3009,7 @@ public:
" starting at " HR_FORMAT,
p2i(_nm), HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()));
// HeapRegion::add_code_root_locked() avoids adding duplicate entries.
hr->add_code_root_locked(_nm);
hr->add_code_root(_nm);
}
}

View File

@@ -1345,7 +1345,7 @@ void G1Policy::abandon_collection_set_candidates() {
// Clear remembered sets of remaining candidate regions and the actual candidate
// set.
for (HeapRegion* r : *candidates()) {
r->rem_set()->clear_locked(true /* only_cardset */);
r->rem_set()->clear(true /* only_cardset */);
}
_collection_set->abandon_all_candidates();
}

View File

@@ -257,7 +257,6 @@ private:
public:
G1RemSetScanState() :
_max_reserved_regions(0),
_collection_set_iter_state(nullptr),
_card_table_scan_state(nullptr),
_scan_chunks_per_region(G1CollectedHeap::get_chunks_per_region()),
_log_scan_chunks_per_region(log2i(_scan_chunks_per_region)),
@@ -270,16 +269,14 @@ public:
}
~G1RemSetScanState() {
FREE_C_HEAP_ARRAY(G1RemsetIterState, _collection_set_iter_state);
FREE_C_HEAP_ARRAY(uint, _card_table_scan_state);
FREE_C_HEAP_ARRAY(bool, _region_scan_chunks);
FREE_C_HEAP_ARRAY(HeapWord*, _scan_top);
}
void initialize(size_t max_reserved_regions) {
assert(_collection_set_iter_state == nullptr, "Must not be initialized twice");
assert(_card_table_scan_state == nullptr, "Must not be initialized twice");
_max_reserved_regions = max_reserved_regions;
_collection_set_iter_state = NEW_C_HEAP_ARRAY(G1RemsetIterState, max_reserved_regions, mtGC);
_card_table_scan_state = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
_num_total_scan_chunks = max_reserved_regions * _scan_chunks_per_region;
_region_scan_chunks = NEW_C_HEAP_ARRAY(bool, _num_total_scan_chunks, mtGC);
@@ -294,7 +291,6 @@ public:
// become used during the collection these values must be valid
// for those regions as well.
for (size_t i = 0; i < _max_reserved_regions; i++) {
reset_region_claim((uint)i);
clear_scan_top((uint)i);
}
@@ -399,20 +395,6 @@ public:
} while (cur != start_pos);
}
void reset_region_claim(uint region_idx) {
_collection_set_iter_state[region_idx] = false;
}
// Attempt to claim the given region in the collection set for iteration. Returns true
// if this call caused the transition from Unclaimed to Claimed.
inline bool claim_collection_set_region(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
if (_collection_set_iter_state[region]) {
return false;
}
return !Atomic::cmpxchg(&_collection_set_iter_state[region], false, true);
}
bool has_cards_to_scan(uint region) {
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
return _card_table_scan_state[region] < HeapRegion::CardsPerRegion;
@@ -829,8 +811,6 @@ public:
_rem_set_opt_trim_partially_time() { }
bool do_heap_region(HeapRegion* r) {
uint const region_idx = r->hrm_index();
// The individual references for the optional remembered set are per-worker, so we
// always need to scan them.
if (r->has_index_in_opt_cset()) {
@@ -841,7 +821,8 @@ public:
event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase));
}
if (_scan_state->claim_collection_set_region(region_idx)) {
// Scan code root remembered sets.
{
EventGCPhaseParallel event;
G1EvacPhaseWithTrimTimeTracker timer(_pss, _code_root_scan_time, _code_trim_partially_time);
G1ScanAndCountCodeBlobClosure cl(_pss->closures()->weak_codeblobs());
@@ -1212,7 +1193,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
// implicitly rebuild anything else during eager reclaim. Note that at the moment
// (and probably never) we do not enter this path if there are other kind of
// remembered sets for this region.
r->rem_set()->clear_locked(true /* only_cardset */);
r->rem_set()->clear(true /* only_cardset */);
// Clear_locked() above sets the state to Empty. However we want to continue
// collecting remembered set entries for humongous regions that were not
// reclaimed.

View File

@@ -142,7 +142,7 @@ void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
[&] (HeapRegion* r) {
assert(!r->is_continues_humongous() || r->rem_set()->is_empty(),
"Continues humongous region %u remset should be empty", r->hrm_index());
r->rem_set()->clear_locked(true /* only_cardset */);
r->rem_set()->clear(true /* only_cardset */);
});
}
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();

View File

@@ -105,7 +105,7 @@ void HeapRegion::handle_evacuation_failure(bool retain) {
move_to_old();
_rem_set->clean_code_roots(this);
_rem_set->clear_locked(true /* only_cardset */, retain /* keep_tracked */);
_rem_set->clear(true /* only_cardset */, retain /* keep_tracked */);
}
void HeapRegion::unlink_from_list() {
@@ -122,7 +122,7 @@ void HeapRegion::hr_clear(bool clear_space) {
set_free();
reset_pre_dummy_top();
rem_set()->clear_locked();
rem_set()->clear();
init_top_at_mark_start();
if (clear_space) clear(SpaceDecorator::Mangle);
@@ -205,7 +205,7 @@ void HeapRegion::clear_humongous() {
}
void HeapRegion::prepare_remset_for_scan() {
return _rem_set->reset_table_scanner();
_rem_set->reset_table_scanner();
}
HeapRegion::HeapRegion(uint hrm_index,
@@ -275,24 +275,15 @@ void HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) {
// Code roots support
void HeapRegion::add_code_root(nmethod* nm) {
HeapRegionRemSet* hrrs = rem_set();
hrrs->add_code_root(nm);
}
void HeapRegion::add_code_root_locked(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
HeapRegionRemSet* hrrs = rem_set();
hrrs->add_code_root_locked(nm);
rem_set()->add_code_root(nm);
}
void HeapRegion::remove_code_root(nmethod* nm) {
HeapRegionRemSet* hrrs = rem_set();
hrrs->remove_code_root(nm);
rem_set()->remove_code_root(nm);
}
void HeapRegion::code_roots_do(CodeBlobClosure* blk) const {
HeapRegionRemSet* hrrs = rem_set();
hrrs->code_roots_do(blk);
rem_set()->code_roots_do(blk);
}
class VerifyCodeRootOopClosure: public OopClosure {

View File

@@ -544,7 +544,6 @@ public:
// Routines for managing a list of code roots (attached to the
// this region's RSet) that point into this heap region.
void add_code_root(nmethod* nm);
void add_code_root_locked(nmethod* nm);
void remove_code_root(nmethod* nm);
// Applies blk->do_code_blob() to each of the entries in

View File

@@ -57,7 +57,6 @@ void HeapRegionRemSet::initialize(MemRegion reserved) {
HeapRegionRemSet::HeapRegionRemSet(HeapRegion* hr,
G1CardSetConfiguration* config) :
_m(Mutex::service - 1, FormatBuffer<128>("HeapRegionRemSet#%u_lock", hr->hrm_index())),
_code_roots(),
_card_set_mm(config, G1CollectedHeap::heap()->card_set_freelist_pool()),
_card_set(config, &_card_set_mm),
@@ -68,12 +67,7 @@ void HeapRegionRemSet::clear_fcc() {
G1FromCardCache::clear(_hr->hrm_index());
}
void HeapRegionRemSet::clear(bool only_cardset) {
MutexLocker x(&_m, Mutex::_no_safepoint_check_flag);
clear_locked(only_cardset);
}
void HeapRegionRemSet::clear_locked(bool only_cardset, bool keep_tracked) {
void HeapRegionRemSet::clear(bool only_cardset, bool keep_tracked) {
if (!only_cardset) {
_code_roots.clear();
}
@@ -88,6 +82,7 @@ void HeapRegionRemSet::clear_locked(bool only_cardset, bool keep_tracked) {
}
void HeapRegionRemSet::reset_table_scanner() {
_code_roots.reset_table_scanner();
_card_set.reset_table_scanner();
}
@@ -108,33 +103,12 @@ void HeapRegionRemSet::print_static_mem_size(outputStream* out) {
void HeapRegionRemSet::add_code_root(nmethod* nm) {
assert(nm != nullptr, "sanity");
assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()),
"should call add_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s",
BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()));
MutexLocker ml(&_m, Mutex::_no_safepoint_check_flag);
add_code_root_locked(nm);
}
void HeapRegionRemSet::add_code_root_locked(nmethod* nm) {
assert(nm != nullptr, "sanity");
assert((CodeCache_lock->owned_by_self() ||
(SafepointSynchronize::is_at_safepoint() &&
(_m.owned_by_self() || Thread::current()->is_VM_thread()))),
"not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s",
BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()),
BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread()));
if (!_code_roots.contains(nm)) { // with this test, we can assert that we do not modify the hash table while iterating over it
_code_roots.add(nm);
}
_code_roots.add(nm);
}
void HeapRegionRemSet::remove_code_root(nmethod* nm) {
assert(nm != nullptr, "sanity");
assert_locked_or_safepoint(CodeCache_lock);
ConditionalMutexLocker ml(&_m, !CodeCache_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
_code_roots.remove(nm);
// Check that there were no duplicates

View File

@@ -40,7 +40,6 @@ class outputStream;
class HeapRegionRemSet : public CHeapObj<mtGC> {
friend class VMStructs;
Mutex _m;
// A set of code blobs (nmethods) whose code contains pointers into
// the region that owns this RSet.
G1CodeRootSet _code_roots;
@@ -117,8 +116,7 @@ public:
// The region is being reclaimed; clear its remset, and any mention of
// entries for this region in other remsets.
void clear(bool only_cardset = false);
void clear_locked(bool only_cardset = false, bool keep_tracked = false);
void clear(bool only_cardset = false, bool keep_tracked = false);
void reset_table_scanner();
@@ -167,7 +165,6 @@ public:
// Returns true if the code roots contains the given
// nmethod.
bool code_roots_list_contains(nmethod* nm) {
MutexLocker ml(&_m, Mutex::_no_safepoint_check_flag);
return _code_roots.contains(nm);
}

View File

@@ -231,7 +231,7 @@ void mutex_init() {
MUTEX_DEFN(StringDedupIntern_lock , PaddedMutex , nosafepoint);
MUTEX_DEFN(RawMonitor_lock , PaddedMutex , nosafepoint-1);
MUTEX_DEFN(Metaspace_lock , PaddedMutex , nosafepoint-3);
MUTEX_DEFN(Metaspace_lock , PaddedMutex , nosafepoint-4);
MUTEX_DEFN(MetaspaceCritical_lock , PaddedMonitor, nosafepoint-1);
MUTEX_DEFN(Patching_lock , PaddedMutex , nosafepoint); // used for safepointing and code patching.
@@ -302,9 +302,9 @@ void mutex_init() {
MUTEX_DEFN(UnsafeJlong_lock , PaddedMutex , nosafepoint);
#endif
MUTEX_DEFN(ContinuationRelativize_lock , PaddedMonitor, nosafepoint-3);
MUTEX_DEFN(ContinuationRelativize_lock , PaddedMonitor, nosafepoint-4);
MUTEX_DEFN(CodeHeapStateAnalytics_lock , PaddedMutex , safepoint);
MUTEX_DEFN(ThreadsSMRDelete_lock , PaddedMonitor, nosafepoint-3); // Holds ConcurrentHashTableResize_lock
MUTEX_DEFN(ThreadsSMRDelete_lock , PaddedMonitor, nosafepoint-4); // Holds ConcurrentHashTableResize_lock
MUTEX_DEFN(ThreadIdTableCreate_lock , PaddedMutex , safepoint);
MUTEX_DEFN(SharedDecoder_lock , PaddedMutex , tty-1);
MUTEX_DEFN(DCmdFactory_lock , PaddedMutex , nosafepoint);

View File

@@ -1024,7 +1024,7 @@ ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, bo
_stats_rate = nullptr;
}
_resize_lock =
new Mutex(Mutex::nosafepoint-2, "ConcurrentHashTableResize_lock");
new Mutex(Mutex::nosafepoint-3, "ConcurrentHashTableResize_lock");
_table = new InternalTable(log2size);
assert(log2size_limit >= log2size, "bad ergo");
_size_limit_reached = _table->_log2_size == _log2_size_limit;

View File

@@ -25,15 +25,7 @@
#include "gc/g1/g1CodeRootSet.hpp"
#include "unittest.hpp"
class G1CodeRootSetTest : public ::testing::Test {
public:
size_t threshold() {
return G1CodeRootSet::Threshold;
}
};
TEST_VM_F(G1CodeRootSetTest, g1_code_cache_rem_set) {
TEST_VM(G1CodeRootSet, g1_code_cache_rem_set) {
G1CodeRootSet root_set;
ASSERT_TRUE(root_set.is_empty()) << "Code root set must be initially empty "
@@ -43,7 +35,7 @@ TEST_VM_F(G1CodeRootSetTest, g1_code_cache_rem_set) {
ASSERT_EQ(root_set.length(), (size_t) 1) << "Added exactly one element, but"
" set contains " << root_set.length() << " elements";
const size_t num_to_add = (size_t) threshold() + 1;
const size_t num_to_add = 1000;
for (size_t i = 1; i <= num_to_add; i++) {
root_set.add((nmethod*) 1);
@@ -60,9 +52,6 @@ TEST_VM_F(G1CodeRootSetTest, g1_code_cache_rem_set) {
<< "After adding in total " << num_to_add << " distinct code roots, "
"they need to be in the set, but there are only " << root_set.length();
ASSERT_EQ(root_set._table->table_size(), 512u)
<< "should have grown to large hashtable";
size_t num_popped = 0;
for (size_t i = 1; i <= num_to_add; i++) {
bool removed = root_set.remove((nmethod*) i);
@@ -76,5 +65,5 @@ TEST_VM_F(G1CodeRootSetTest, g1_code_cache_rem_set) {
<< "Managed to pop " << num_popped << " code roots, but only "
<< num_to_add << " were added";
ASSERT_EQ(root_set.length(), 0u)
<< "should have grown to large hashtable";
<< "should be empty";
}