5555
5656GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr ;
5757
58- // The following are offsets from buffer_bottom()
58+ bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false ;
5959size_t AOTMappedHeapWriter::_buffer_used;
6060
6161// Heap root segments
@@ -74,7 +74,7 @@ AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
7474DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr ;
7575
7676typedef HashTable<
77- size_t , // offset of a filler from ArchiveHeapWriter ::buffer_bottom()
77+ size_t , // offset of a filler from AOTMappedHeapWriter ::buffer_bottom()
7878 size_t , // size of this filler (in bytes)
7979 127 , // prime number
8080 AnyObj::C_HEAP,
@@ -96,6 +96,45 @@ void AOTMappedHeapWriter::init() {
9696 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000 );
9797
9898 guarantee (MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words () * HeapWordSize, " must be" );
99+
100+ if (CDSConfig::old_cds_flags_used ()) {
101+ // With the old CDS workflow, we can guatantee determninistic output: given
102+ // the same classlist file, we can generate the same static CDS archive.
103+ // To ensure determinism, we always use the same compressed oop encoding
104+ // (zero-based, no shift). See set_requested_address_range().
105+ _is_writing_deterministic_heap = true ;
106+ } else {
107+ // Determninistic output is not supported by the new AOT workflow, so
108+ // we don't force the (zero-based, no shift) encoding. This way, it is more
109+ // likely that we can avoid oop relocation in the production run.
110+ _is_writing_deterministic_heap = false ;
111+ }
112+ }
113+ }
114+
115+ // For AOTMappedHeapWriter::narrow_oop_{mode, base, shift}(), see comments
116+ // in AOTMappedHeapWriter::set_requested_address_range(),
117+ CompressedOops::Mode AOTMappedHeapWriter::narrow_oop_mode () {
118+ if (is_writing_deterministic_heap ()) {
119+ return CompressedOops::UnscaledNarrowOop;
120+ } else {
121+ return CompressedOops::mode ();
122+ }
123+ }
124+
125+ address AOTMappedHeapWriter::narrow_oop_base () {
126+ if (is_writing_deterministic_heap ()) {
127+ return (address)0 ;
128+ } else {
129+ return CompressedOops::base ();
130+ }
131+ }
132+
133+ int AOTMappedHeapWriter::narrow_oop_shift () {
134+ if (is_writing_deterministic_heap ()) {
135+ return 0 ;
136+ } else {
137+ return CompressedOops::shift ();
99138 }
100139}
101140
@@ -116,7 +155,7 @@ void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
116155 assert (CDSConfig::is_dumping_heap (), " sanity" );
117156 allocate_buffer ();
118157 copy_source_objs_to_buffer (roots);
119- set_requested_address (heap_info);
158+ set_requested_address_range (heap_info);
120159 relocate_embedded_oops (roots, heap_info);
121160}
122161
@@ -536,14 +575,55 @@ size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
536575 return buffered_obj_offset;
537576}
538577
539- void AOTMappedHeapWriter::set_requested_address (ArchiveMappedHeapInfo* info) {
578+ // Set the range [_requested_bottom, _requested_top), the requested address range of all
579+ // the archived heap objects in the production run.
580+ //
581+ // (1) UseCompressedOops == true && !is_writing_deterministic_heap()
582+ //
583+ // The archived objects are stored using the COOPS encoding of the assembly phase.
584+ // We pick a range within the heap used by the assembly phase.
585+ //
586+ // In the production run, if different COOPS encodings are used:
587+ // - The heap contents needs to be relocated.
588+ //
589+ // (2) UseCompressedOops == true && is_writing_deterministic_heap()
590+ //
591+ // We always use zero-based, zero-shift encoding. _requested_top is aligned to 0x10000000.
592+ //
593+ // (3) UseCompressedOops == false:
594+ //
595+ // In the production run, the heap range is usually picked (randomly) by the OS, so we
596+ // will almost always need to perform relocation, regardless of how we pick the requested
597+ // address range.
598+ //
599+ // So we just hard code it to NOCOOPS_REQUESTED_BASE.
600+ //
601+ void AOTMappedHeapWriter::set_requested_address_range (ArchiveMappedHeapInfo* info) {
540602 assert (!info->is_used (), " only set once" );
541603
542604 size_t heap_region_byte_size = _buffer_used;
543605 assert (heap_region_byte_size > 0 , " must archived at least one object!" );
544606
545607 if (UseCompressedOops) {
546- if (UseG1GC) {
608+ if (is_writing_deterministic_heap ()) {
609+ // Pick a heap range so that requested addresses can be encoded with zero-base/no shift.
610+ // We align the requested bottom to at least 1 MB: if the production run uses G1 with a small
611+ // heap (e.g., -Xmx256m), it's likely that we can map the archived objects at the
612+ // requested location to avoid relocation.
613+ //
614+ // For other collectors or larger heaps, relocation is unavoidable, but is usually
615+ // quite cheap. If you really want to avoid relocation, use the AOT workflow instead.
616+ address heap_end = (address)0x100000000 ;
617+ size_t alignment = MAX2 (MIN_GC_REGION_ALIGNMENT, 1024 * 1024 );
618+ if (align_up (heap_region_byte_size, alignment) >= (size_t )heap_end) {
619+ log_error (aot, heap)(" cached heap space is too large: %zu bytes" , heap_region_byte_size);
620+ AOTMetaspace::unrecoverable_writing_error ();
621+ }
622+ _requested_bottom = align_down (heap_end - heap_region_byte_size, alignment);
623+ } else if (UseG1GC) {
624+ // For G1, pick the range at the top of the current heap. If the exact same heap sizes
625+ // are used in the production run, it's likely that we can map the archived objects
626+ // at the requested location to avoid relocation.
547627 address heap_end = (address)G1CollectedHeap::heap ()->reserved ().end ();
548628 log_info (aot, heap)(" Heap end = %p" , heap_end);
549629 _requested_bottom = align_down (heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
@@ -612,7 +692,14 @@ oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
612692
613693template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer (T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
614694 oop request_referent = source_obj_to_requested_obj (source_referent);
615- store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
695+ if (UseCompressedOops && is_writing_deterministic_heap ()) {
696+ // We use zero-based, 0-shift encoding, so the narrowOop is just the lower
697+ // 32 bits of request_referent
698+ intptr_t addr = cast_from_oop<intptr_t >(request_referent);
699+ *((narrowOop*)field_addr_in_buffer) = checked_cast<narrowOop>(addr);
700+ } else {
701+ store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
702+ }
616703 if (request_referent != nullptr ) {
617704 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
618705 }
@@ -918,9 +1005,9 @@ AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHe
9181005 address buffer_start = address(r.start());
9191006 address buffer_end = address(r.end());
9201007
921- address requested_base = UseCompressedOops ? (address)CompressedOops::base () : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
922- address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
923- int requested_shift = CompressedOops::shift ();
1008+ address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base () : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
1009+ address requested_start = UseCompressedOops ? AOTMappedHeapWriter:: buffered_addr_to_requested_addr(buffer_start) : requested_base;
1010+ int requested_shift = AOTMappedHeapWriter::narrow_oop_shift ();
9241011 intptr_t buffer_to_requested_delta = requested_start - buffer_start;
9251012 uint64_t buffer_start_narrow_oop = 0xdeadbeed ;
9261013 if (UseCompressedOops) {
0 commit comments