Skip to content
This repository has been archived by the owner on Oct 23, 2024. It is now read-only.

Commit

Permalink
Merge upstream-jdk
Browse files Browse the repository at this point in the history
  • Loading branch information
corretto-github-robot committed Apr 22, 2024
2 parents f240177 + 5f333b5 commit 6fdd2c8
Show file tree
Hide file tree
Showing 8 changed files with 109 additions and 33 deletions.
13 changes: 6 additions & 7 deletions src/hotspot/share/gc/g1/g1YoungCollector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -484,13 +484,8 @@ void G1YoungCollector::set_young_collection_default_active_worker_threads(){
}

void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {

// Must be before collection set calculation, requires collection set to not
// be calculated yet.
if (collector_state()->in_concurrent_start_gc()) {
concurrent_mark()->pre_concurrent_start(_gc_cause);
}

// Flush various data in thread-local buffers to be able to determine the collection
// set
{
Ticks start = Ticks::now();
G1PreEvacuateCollectionSetBatchTask cl;
Expand All @@ -501,6 +496,10 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info)
// Needs log buffers flushed.
calculate_collection_set(evacuation_info, policy()->max_pause_time_ms());

if (collector_state()->in_concurrent_start_gc()) {
concurrent_mark()->pre_concurrent_start(_gc_cause);
}

// Please see comment in g1CollectedHeap.hpp and
// G1CollectedHeap::ref_processing_init() to see how
// reference processing currently works in G1.
Expand Down
1 change: 1 addition & 0 deletions src/hotspot/share/gc/g1/g1YoungGCPreEvacuateTasks.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

// Set of pre evacuate collection set tasks containing ("s" means serial):
// - Retire TLAB and Flush Logs (Java threads)
// - Flush pin count cache (Java threads)
// - Flush Logs (s) (Non-Java threads)
class G1PreEvacuateCollectionSetBatchTask : public G1BatchedTask {
class JavaThreadRetireTLABAndFlushLogs;
Expand Down
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/g1/heapRegion.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ inline void HeapRegion::reset_parsable_bottom() {

inline void HeapRegion::note_start_of_marking() {
assert(top_at_mark_start() == bottom(), "Region's TAMS must always be at bottom");
if (is_old_or_humongous() && !is_collection_set_candidate()) {
if (is_old_or_humongous() && !is_collection_set_candidate() && !in_collection_set()) {
set_top_at_mark_start(top());
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,6 @@ class StackTraceBlobInstaller {
StackTraceBlobInstaller() : _cache(JfrOptionSet::old_object_queue_size()) {
prepare_for_resolution();
}
~StackTraceBlobInstaller() {
JfrStackTraceRepository::clear_leak_profiler();
}
void sample_do(ObjectSample* sample) {
if (stack_trace_precondition(sample)) {
install(sample);
Expand Down Expand Up @@ -270,11 +267,14 @@ void ObjectSampleCheckpoint::on_rotation(const ObjectSampler* sampler) {
assert(LeakProfiler::is_running(), "invariant");
JavaThread* const thread = JavaThread::current();
DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_native(thread);)
// can safepoint here
ThreadInVMfromNative transition(thread);
MutexLocker lock(ClassLoaderDataGraph_lock);
// the lock is needed to ensure the unload lists do not grow in the middle of inspection.
install_stack_traces(sampler);
{
// can safepoint here
ThreadInVMfromNative transition(thread);
MutexLocker lock(ClassLoaderDataGraph_lock);
// the lock is needed to ensure the unload lists do not grow in the middle of inspection.
install_stack_traces(sampler);
}
JfrStackTraceRepository::clear_leak_profiler();
}

static bool is_klass_unloaded(traceid klass_id) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -573,9 +573,7 @@ void JfrRecorderService::pre_safepoint_write() {
ObjectSampleCheckpoint::on_rotation(ObjectSampler::acquire());
}
write_storage(_storage, _chunkwriter);
if (_stack_trace_repository.is_modified()) {
write_stacktrace(_stack_trace_repository, _chunkwriter, false);
}
write_stacktrace(_stack_trace_repository, _chunkwriter, true);
}

void JfrRecorderService::invoke_safepoint_write() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -98,11 +98,10 @@ bool JfrStackTraceRepository::is_modified() const {
}

size_t JfrStackTraceRepository::write(JfrChunkWriter& sw, bool clear) {
MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
if (_entries == 0) {
return 0;
}
MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
assert(_entries > 0, "invariant");
int count = 0;
for (u4 i = 0; i < TABLE_SIZE; ++i) {
JfrStackTrace* stacktrace = _table[i];
Expand Down
31 changes: 19 additions & 12 deletions src/hotspot/share/utilities/concurrentHashTable.inline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1223,23 +1223,30 @@ template <typename VALUE_SIZE_FUNC>
inline TableStatistics ConcurrentHashTable<CONFIG, F>::
statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f)
{
constexpr size_t batch_size = 128;
NumberSeq summary;
size_t literal_bytes = 0;
InternalTable* table = get_table();
for (size_t bucket_it = 0; bucket_it < table->_size; bucket_it++) {
size_t num_batches = table->_size / batch_size;
for (size_t batch_start = 0; batch_start < _table->_size; batch_start += batch_size) {
// We batch the use of ScopedCS here as it has been found to be quite expensive to
// invoke it for every single bucket.
size_t batch_end = MIN2(batch_start + batch_size, _table->_size);
ScopedCS cs(thread, this);
size_t count = 0;
Bucket* bucket = table->get_bucket(bucket_it);
if (bucket->have_redirect() || bucket->is_locked()) {
continue;
}
Node* current_node = bucket->first();
while (current_node != nullptr) {
++count;
literal_bytes += vs_f(current_node->value());
current_node = current_node->next();
for (size_t bucket_it = batch_start; bucket_it < batch_end; bucket_it++) {
size_t count = 0;
Bucket* bucket = table->get_bucket(bucket_it);
if (bucket->have_redirect() || bucket->is_locked()) {
continue;
}
Node* current_node = bucket->first();
while (current_node != nullptr) {
++count;
literal_bytes += vs_f(current_node->value());
current_node = current_node->next();
}
summary.add((double)count);
}
summary.add((double)count);
}

if (_stats_rate == nullptr) {
Expand Down
72 changes: 72 additions & 0 deletions test/hotspot/jtreg/gc/g1/pinnedobjs/TestDroppedRetainedTAMS.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/

/* @test
* @summary Check that TAMSes are correctly updated for regions dropped from
* the retained collection set candidates during a Concurrent Start pause.
* @requires vm.gc.G1
* @requires vm.flagless
* @library /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -XX:+UseG1GC -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions
-XX:+WhiteBoxAPI -Xbootclasspath/a:. -Xmx32m -XX:G1NumCollectionsKeepPinned=1
-XX:+VerifyBeforeGC -XX:+VerifyAfterGC -XX:G1MixedGCLiveThresholdPercent=100
-XX:G1HeapWastePercent=0 -Xlog:gc,gc+ergo+cset=trace gc.g1.pinnedobjs.TestDroppedRetainedTAMS
*/

package gc.g1.pinnedobjs;

import jdk.test.whitebox.WhiteBox;

public class TestDroppedRetainedTAMS {

private static final WhiteBox wb = WhiteBox.getWhiteBox();

private static final char[] dummy = new char[100];

public static void main(String[] args) {
wb.fullGC(); // Move the target dummy object to old gen.

wb.pinObject(dummy);

// After this concurrent cycle the pinned region will be in the the (marking)
// collection set candidates.
wb.g1RunConcurrentGC();

// Pass the Prepare mixed gc which will not do anything about the marking
// candidates.
wb.youngGC();
// Mixed GC. Will complete. That pinned region is now retained. The mixed gcs
// will end here.
wb.youngGC();

// The pinned region will be dropped from the retained candidates during the
// Concurrent Start GC, leaving that region's TAMS broken.
wb.g1RunConcurrentGC();

// Verification will find a lot of broken objects.
wb.youngGC();
System.out.println(dummy);
}
}

0 comments on commit 6fdd2c8

Please sign in to comment.