Skip to content
This repository has been archived by the owner on Oct 23, 2024. It is now read-only.

Commit

Permalink
Merge upstream-jdk
Browse files Browse the repository at this point in the history
  • Loading branch information
corretto-github-robot committed Dec 7, 2023
2 parents 07efe2f + 632a3c5 commit 65a9ab1
Show file tree
Hide file tree
Showing 28 changed files with 363 additions and 331 deletions.
142 changes: 68 additions & 74 deletions src/hotspot/share/gc/parallel/objectStartArray.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,32 +25,21 @@
#include "precompiled.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "memory/allocation.inline.hpp"
#include "nmt/memTracker.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/align.hpp"

uint ObjectStartArray::_card_shift = 0;
uint ObjectStartArray::_card_size = 0;
uint ObjectStartArray::_card_size_in_words = 0;
static size_t num_bytes_required(MemRegion mr) {
assert(CardTable::is_card_aligned(mr.start()), "precondition");
assert(CardTable::is_card_aligned(mr.end()), "precondition");

void ObjectStartArray::initialize_block_size(uint card_shift) {
_card_shift = card_shift;
_card_size = 1 << _card_shift;
_card_size_in_words = _card_size / sizeof(HeapWord);
return mr.word_size() / BOTConstants::card_size_in_words();
}

void ObjectStartArray::initialize(MemRegion reserved_region) {
// We're based on the assumption that we use the same
// size blocks as the card table.
assert(_card_size == CardTable::card_size(), "Sanity");
assert(_card_size <= MaxBlockSize, "block_size must be less than or equal to " UINT32_FORMAT, MaxBlockSize);

// Calculate how much space must be reserved
_reserved_region = reserved_region;

size_t bytes_to_reserve = reserved_region.word_size() / _card_size_in_words;
size_t bytes_to_reserve = num_bytes_required(reserved_region);
assert(bytes_to_reserve > 0, "Sanity");

bytes_to_reserve =
Expand All @@ -62,91 +51,96 @@ void ObjectStartArray::initialize(MemRegion reserved_region) {
if (!backing_store.is_reserved()) {
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
}
MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC);
MemTracker::record_virtual_memory_type(backing_store.base(), mtGC);

// We do not commit any memory initially
_virtual_space.initialize(backing_store);

_raw_base = (jbyte*)_virtual_space.low_boundary();
assert(_raw_base != nullptr, "set from the backing_store");

_offset_base = _raw_base - (size_t(reserved_region.start()) >> _card_shift);

_covered_region.set_start(reserved_region.start());
_covered_region.set_word_size(0);
assert(_virtual_space.low_boundary() != nullptr, "set from the backing_store");

_blocks_region.set_start((HeapWord*)_raw_base);
_blocks_region.set_word_size(0);
_offset_base = (uint8_t*)(_virtual_space.low_boundary() - (uintptr_t(reserved_region.start()) >> BOTConstants::log_card_size()));
}

void ObjectStartArray::set_covered_region(MemRegion mr) {
assert(_reserved_region.contains(mr), "MemRegion outside of reserved space");
assert(_reserved_region.start() == mr.start(), "Attempt to move covered region");

HeapWord* low_bound = mr.start();
HeapWord* high_bound = mr.end();
assert((uintptr_t(low_bound) & (_card_size - 1)) == 0, "heap must start at block boundary");
assert((uintptr_t(high_bound) & (_card_size - 1)) == 0, "heap must end at block boundary");

size_t requested_blocks_size_in_bytes = mr.word_size() / _card_size_in_words;
DEBUG_ONLY(_covered_region = mr;)

size_t requested_size = num_bytes_required(mr);
// Only commit memory in page sized chunks
requested_blocks_size_in_bytes =
align_up(requested_blocks_size_in_bytes, os::vm_page_size());
requested_size = align_up(requested_size, os::vm_page_size());

_covered_region = mr;
size_t current_size = _virtual_space.committed_size();

size_t current_blocks_size_in_bytes = _blocks_region.byte_size();
if (requested_size == current_size) {
return;
}

if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) {
if (requested_size > current_size) {
// Expand
size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes;
size_t expand_by = requested_size - current_size;
if (!_virtual_space.expand_by(expand_by)) {
vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion");
}
// Clear *only* the newly allocated region
memset(_blocks_region.end(), clean_block, expand_by);
}

if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) {
} else {
// Shrink
size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes;
size_t shrink_by = current_size - requested_size;
_virtual_space.shrink_by(shrink_by);
}

_blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord));

assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment");
assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity");
assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map");
assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map");
}

void ObjectStartArray::reset() {
memset(_blocks_region.start(), clean_block, _blocks_region.byte_size());
static void fill_range(uint8_t* start, uint8_t* end, uint8_t v) {
// + 1 for inclusive
memset(start, v, pointer_delta(end, start, sizeof(uint8_t)) + 1);
}

bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr,
HeapWord* end_addr) const {
assert(start_addr <= end_addr,
"Range is wrong. start_addr (" PTR_FORMAT ") is after end_addr (" PTR_FORMAT ")",
p2i(start_addr), p2i(end_addr));

assert(is_aligned(start_addr, _card_size), "precondition");

if (start_addr == end_addr) {
// No objects in empty range.
return false;
void ObjectStartArray::update_for_block_work(HeapWord* blk_start,
HeapWord* blk_end) {
HeapWord* const cur_card_boundary = align_up_by_card_size(blk_start);
uint8_t* const offset_entry = entry_for_addr(cur_card_boundary);

// The first card holds the actual offset.
*offset_entry = checked_cast<uint8_t>(pointer_delta(cur_card_boundary, blk_start));

// Check if this block spans over other cards.
uint8_t* const end_entry = entry_for_addr(blk_end - 1);
assert(offset_entry <= end_entry, "inv");

if (offset_entry != end_entry) {
// Handling remaining entries.
uint8_t* start_entry_for_region = offset_entry + 1;
for (uint i = 0; i < BOTConstants::N_powers; i++) {
// -1 so that the reach ends in this region and not at the start
// of the next.
uint8_t* reach = offset_entry + BOTConstants::power_to_cards_back(i + 1) - 1;
uint8_t value = checked_cast<uint8_t>(BOTConstants::card_size_in_words() + i);

fill_range(start_entry_for_region, MIN2(reach, end_entry), value);
start_entry_for_region = reach + 1;

if (reach >= end_entry) {
break;
}
}
assert(start_entry_for_region > end_entry, "Sanity check");
}

jbyte* start_block = block_for_addr(start_addr);
jbyte* end_block = block_for_addr(end_addr - 1);
debug_only(verify_for_block(blk_start, blk_end);)
}

for (jbyte* block = start_block; block <= end_block; block++) {
if (*block != clean_block) {
return true;
void ObjectStartArray::verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const {
assert(is_crossing_card_boundary(blk_start, blk_end), "precondition");

const uint8_t* const start_entry = entry_for_addr(align_up_by_card_size(blk_start));
const uint8_t* const end_entry = entry_for_addr(blk_end - 1);
// Check entries in [start_entry, end_entry]
assert(*start_entry < BOTConstants::card_size_in_words(), "offset entry");

for (const uint8_t* i = start_entry + 1; i <= end_entry; ++i) {
const uint8_t prev = *(i-1);
const uint8_t value = *i;
if (prev != value) {
assert(value >= prev, "monotonic");
size_t n_cards_back = BOTConstants::entry_to_cards_back(value);
assert(start_entry == (i - n_cards_back), "inv");
}
}

return false;
}
137 changes: 31 additions & 106 deletions src/hotspot/share/gc/parallel/objectStartArray.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#define SHARE_GC_PARALLEL_OBJECTSTARTARRAY_HPP

#include "gc/parallel/psVirtualspace.hpp"
#include "gc/shared/blockOffsetTable.hpp"
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "oops/oop.hpp"
Expand All @@ -36,141 +37,65 @@
//

class ObjectStartArray : public CHeapObj<mtGC> {
friend class VerifyObjectStartArrayClosure;

private:
PSVirtualSpace _virtual_space;
MemRegion _reserved_region;
// The committed (old-gen heap) virtual space this object-start-array covers.
MemRegion _covered_region;
MemRegion _blocks_region;
jbyte* _raw_base;
jbyte* _offset_base;

static uint _card_shift;
static uint _card_size;
static uint _card_size_in_words;

public:

enum BlockValueConstants {
clean_block = -1
};

// Maximum size an offset table entry can cover. This maximum is derived from that
// we need an extra bit for possible offsets in the byte for backskip values, leaving 2^7 possible offsets.
// Minimum object alignment is 8 bytes (2^3), so we can at most represent 2^10 offsets within a BOT value.
static const uint MaxBlockSize = 1024;

// Initialize block size based on card size
static void initialize_block_size(uint card_shift);
DEBUG_ONLY(MemRegion _covered_region;)

static uint card_shift() {
return _card_shift;
}

static uint card_size() {
return _card_size;
}
static uint card_size_in_words() {
return _card_size_in_words;
}
// BOT array
PSVirtualSpace _virtual_space;

protected:
// Biased array-start of BOT array for fast heap-addr / BOT entry translation
uint8_t* _offset_base;

// Mapping from address to object start array entry
jbyte* block_for_addr(void* p) const {
uint8_t* entry_for_addr(const void* const p) const {
assert(_covered_region.contains(p),
"out of bounds access to object start array");
jbyte* result = &_offset_base[uintptr_t(p) >> _card_shift];
assert(_blocks_region.contains(result),
"out of bounds result in byte_for");
uint8_t* result = &_offset_base[uintptr_t(p) >> BOTConstants::log_card_size()];
return result;
}

// Mapping from object start array entry to address of first word
HeapWord* addr_for_block(jbyte* p) {
assert(_blocks_region.contains(p),
"out of bounds access to object start array");
size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
HeapWord* result = (HeapWord*) (delta << _card_shift);
HeapWord* addr_for_entry(const uint8_t* const p) const {
size_t delta = pointer_delta(p, _offset_base, sizeof(uint8_t));
HeapWord* result = (HeapWord*) (delta << BOTConstants::log_card_size());
assert(_covered_region.contains(result),
"out of bounds accessor from card marking array");
return result;
}

// Mapping that includes the derived offset.
// If the block is clean, returns the last address in the covered region.
// If the block is < index 0, returns the start of the covered region.
HeapWord* offset_addr_for_block(jbyte* p) const {
// We have to do this before the assert
if (p < _raw_base) {
return _covered_region.start();
}

assert(_blocks_region.contains(p),
"out of bounds access to object start array");

if (*p == clean_block) {
return _covered_region.end();
}
static HeapWord* align_up_by_card_size(HeapWord* const addr) {
return align_up(addr, BOTConstants::card_size());
}

size_t delta = pointer_delta(p, _offset_base, sizeof(jbyte));
HeapWord* result = (HeapWord*) (delta << _card_shift);
result += *p;
void update_for_block_work(HeapWord* blk_start, HeapWord* blk_end);

assert(_covered_region.contains(result),
"out of bounds accessor from card marking array");

return result;
}
void verify_for_block(HeapWord* blk_start, HeapWord* blk_end) const;

public:

// This method is in lieu of a constructor, so that this class can be
// embedded inline in other classes.
void initialize(MemRegion reserved_region);

// Heap old-gen resizing
void set_covered_region(MemRegion mr);

void reset();

MemRegion covered_region() { return _covered_region; }

#define assert_covered_region_contains(addr) \
assert(_covered_region.contains(addr), \
#addr " (" PTR_FORMAT ") is not in covered region [" PTR_FORMAT ", " PTR_FORMAT "]", \
p2i(addr), p2i(_covered_region.start()), p2i(_covered_region.end()))

void allocate_block(HeapWord* p) {
assert_covered_region_contains(p);
jbyte* block = block_for_addr(p);
HeapWord* block_base = addr_for_block(block);
size_t offset = pointer_delta(p, block_base, sizeof(HeapWord*));
assert(offset < 128, "Sanity");
// When doing MT offsets, we can't assert this.
//assert(offset > *block, "Found backwards allocation");
*block = (jbyte)offset;
static bool is_crossing_card_boundary(HeapWord* const blk_start,
HeapWord* const blk_end) {
HeapWord* cur_card_boundary = align_up_by_card_size(blk_start);
// Strictly greater-than, since we check if this block *crosses* card boundary.
return blk_end > cur_card_boundary;
}

// Optimized for finding the first object that crosses into
// a given block. The blocks contain the offset of the last
// object in that block. Scroll backwards by one, and the first
// object hit should be at the beginning of the block
inline HeapWord* object_start(HeapWord* addr) const;
// Returns the address of the start of the block reaching into the card containing
// "addr".
inline HeapWord* block_start_reaching_into_card(HeapWord* const addr) const;

bool is_block_allocated(HeapWord* addr) {
assert_covered_region_contains(addr);
jbyte* block = block_for_addr(addr);
return *block != clean_block;
// [blk_start, blk_end) representing a block of memory in the heap.
void update_for_block(HeapWord* blk_start, HeapWord* blk_end) {
if (is_crossing_card_boundary(blk_start, blk_end)) {
update_for_block_work(blk_start, blk_end);
}
}

// Return true iff an object starts in
// [start_addr, end_addr_aligned_up)
// where
// end_addr_aligned_up = align_up(end_addr, _card_size)
// Precondition: start_addr is card-size aligned
bool object_starts_in_range(HeapWord* start_addr, HeapWord* end_addr) const;
inline HeapWord* object_start(HeapWord* const addr) const;
};

#endif // SHARE_GC_PARALLEL_OBJECTSTARTARRAY_HPP
Loading

0 comments on commit 65a9ab1

Please sign in to comment.