Skip to content

Commit 2ad3ce1

Browse files
committed
8376135: [lworld] Add JIT support for NULLABLE_NON_ATOMIC_FLAT layout
Reviewed-by: thartmann, fparain
1 parent 55c80fe commit 2ad3ce1

22 files changed

+234
-118
lines changed

src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -7261,17 +7261,17 @@ bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, in
72617261
b(L_notNull);
72627262
bind(L_null);
72637263
// Set null marker to zero to signal that the argument is null.
7264-
// Also set all oop fields to zero to make the GC happy.
7264+
// Also set all fields to zero since the runtime requires a canonical
7265+
// representation of a flat null.
72657266
stream.reset(sig_index, to_index);
72667267
while (stream.next(toReg, bt)) {
7267-
if (sig->at(stream.sig_index())._offset == -1 ||
7268-
bt == T_OBJECT || bt == T_ARRAY) {
7269-
if (toReg->is_stack()) {
7270-
int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7271-
str(zr, Address(sp, st_off));
7272-
} else {
7273-
mov(toReg->as_Register(), zr);
7274-
}
7268+
if (toReg->is_stack()) {
7269+
int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size;
7270+
str(zr, Address(sp, st_off));
7271+
} else if (toReg->is_FloatRegister()) {
7272+
mov(toReg->as_FloatRegister(), T2S, 0);
7273+
} else {
7274+
mov(toReg->as_Register(), zr);
72757275
}
72767276
}
72777277
bind(L_notNull);

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3055,7 +3055,7 @@ BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(con
30553055
j++;
30563056
}
30573057
assert(j == regs->length(), "missed a field?");
3058-
if (vk->has_nullable_atomic_layout()) {
3058+
if (vk->supports_nullable_layouts()) {
30593059
// Zero the null marker (setting it to 1 would be better but would require an additional register)
30603060
__ strb(zr, Address(r0, vk->null_marker_offset()));
30613061
}
@@ -3067,7 +3067,8 @@ BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(con
30673067
Label not_null;
30683068
__ cbnz(r0, not_null);
30693069

3070-
// Return value is null. Zero oop registers to make the GC happy.
3070+
// Return value is null. Zero all registers because the runtime requires a canonical
3071+
// representation of a flat null.
30713072
j = 1;
30723073
for (int i = 0; i < sig_vk->length(); i++) {
30733074
BasicType bt = sig_vk->at(i)._bt;
@@ -3081,9 +3082,12 @@ BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(con
30813082
}
30823083
continue;
30833084
}
3084-
if (bt == T_OBJECT || bt == T_ARRAY) {
3085-
VMRegPair pair = regs->at(j);
3086-
VMReg r_1 = pair.first();
3085+
3086+
VMRegPair pair = regs->at(j);
3087+
VMReg r_1 = pair.first();
3088+
if (r_1->is_FloatRegister()) {
3089+
__ mov(r_1->as_FloatRegister(), Assembler::T2S, 0);
3090+
} else {
30873091
__ mov(r_1->as_Register(), zr);
30883092
}
30893093
j++;

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -6380,17 +6380,17 @@ bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, in
63806380
jmp(L_notNull);
63816381
bind(L_null);
63826382
// Set null marker to zero to signal that the argument is null.
6383-
// Also set all oop fields to zero to make the GC happy.
6383+
// Also set all fields to zero since the runtime requires a canonical
6384+
// representation of a flat null.
63846385
stream.reset(sig_index, to_index);
63856386
while (stream.next(toReg, bt)) {
6386-
if (sig->at(stream.sig_index())._offset == -1 ||
6387-
bt == T_OBJECT || bt == T_ARRAY) {
6388-
if (toReg->is_stack()) {
6389-
int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6390-
movq(Address(rsp, st_off), 0);
6391-
} else {
6392-
xorq(toReg->as_Register(), toReg->as_Register());
6393-
}
6387+
if (toReg->is_stack()) {
6388+
int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize;
6389+
movq(Address(rsp, st_off), 0);
6390+
} else if (toReg->is_XMMRegister()) {
6391+
xorps(toReg->as_XMMRegister(), toReg->as_XMMRegister());
6392+
} else {
6393+
xorl(toReg->as_Register(), toReg->as_Register());
63946394
}
63956395
}
63966396
bind(L_notNull);

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3848,7 +3848,7 @@ BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(con
38483848
j++;
38493849
}
38503850
assert(j == regs->length(), "missed a field?");
3851-
if (vk->has_nullable_atomic_layout()) {
3851+
if (vk->supports_nullable_layouts()) {
38523852
// Set the null marker
38533853
__ movb(Address(rax, vk->null_marker_offset()), 1);
38543854
}
@@ -3861,7 +3861,8 @@ BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(con
38613861
__ testptr(rax, rax);
38623862
__ jcc(Assembler::notZero, not_null);
38633863

3864-
// Return value is null. Zero oop registers to make the GC happy.
3864+
// Return value is null. Zero all registers because the runtime requires a canonical
3865+
// representation of a flat null.
38653866
j = 1;
38663867
for (int i = 0; i < sig_vk->length(); i++) {
38673868
BasicType bt = sig_vk->at(i)._bt;
@@ -3875,10 +3876,13 @@ BufferedInlineTypeBlob* SharedRuntime::generate_buffered_inline_type_adapter(con
38753876
}
38763877
continue;
38773878
}
3878-
if (bt == T_OBJECT || bt == T_ARRAY) {
3879-
VMRegPair pair = regs->at(j);
3880-
VMReg r_1 = pair.first();
3881-
__ xorq(r_1->as_Register(), r_1->as_Register());
3879+
3880+
VMRegPair pair = regs->at(j);
3881+
VMReg r_1 = pair.first();
3882+
if (r_1->is_XMMRegister()) {
3883+
__ xorps(r_1->as_XMMRegister(), r_1->as_XMMRegister());
3884+
} else {
3885+
__ xorl(r_1->as_Register(), r_1->as_Register());
38823886
}
38833887
j++;
38843888
}

src/hotspot/share/c1/c1_Canonicalizer.cpp

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -468,11 +468,8 @@ void Canonicalizer::do_CompareOp (CompareOp* x) {
468468

469469

470470
void Canonicalizer::do_IfOp(IfOp* x) {
471-
// Currently, Canonicalizer is only used by GraphBuilder,
472-
// and IfOp is not created by GraphBuilder but only later
473-
// when eliminating conditional expressions with CE_Eliminator,
474-
// so this method will not be called.
475-
ShouldNotReachHere();
471+
// Currently, Canonicalizer is only used by GraphBuilder, and IfOp is only created by
472+
// GraphBuilder when loading/storing flat fields, do nothing for now.
476473
}
477474

478475

src/hotspot/share/c1/c1_GraphBuilder.cpp

Lines changed: 78 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,9 @@
2626
#include "c1/c1_CFGPrinter.hpp"
2727
#include "c1/c1_Compilation.hpp"
2828
#include "c1/c1_GraphBuilder.hpp"
29+
#include "c1/c1_Instruction.hpp"
2930
#include "c1/c1_InstructionPrinter.hpp"
31+
#include "c1/c1_ValueType.hpp"
3032
#include "ci/ciCallSite.hpp"
3133
#include "ci/ciField.hpp"
3234
#include "ci/ciFlatArrayKlass.hpp"
@@ -1983,28 +1985,28 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
19831985
// Flat field
19841986
assert(!needs_patching, "Can't patch flat inline type field access");
19851987
ciInlineKlass* inline_klass = field->type()->as_inline_klass();
1986-
bool is_naturally_atomic = inline_klass->nof_declared_nonstatic_fields() <= 1;
1987-
bool needs_atomic_access = !field->is_null_free() || (field->is_volatile() && !is_naturally_atomic);
1988-
if (needs_atomic_access) {
1988+
if (field->is_atomic()) {
19891989
assert(!has_pending_field_access(), "Pending field accesses are not supported");
19901990
LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching);
19911991
push(type, append(load));
19921992
} else {
1993-
assert(field->is_null_free(), "must be null-free");
19941993
// Look at the next bytecode to check if we can delay the field access
19951994
bool can_delay_access = false;
1996-
ciBytecodeStream s(method());
1997-
s.force_bci(bci());
1998-
s.next();
1999-
if (s.cur_bc() == Bytecodes::_getfield && !needs_patching) {
2000-
ciField* next_field = s.get_field(will_link);
2001-
bool next_needs_patching = !next_field->holder()->is_loaded() ||
2002-
!next_field->will_link(method(), Bytecodes::_getfield) ||
2003-
PatchALot;
2004-
// We can't update the offset for atomic accesses
2005-
bool next_needs_atomic_access = !next_field->is_null_free() || next_field->is_volatile();
2006-
can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching && !next_needs_atomic_access;
1995+
if (field->is_null_free()) {
1996+
ciBytecodeStream s(method());
1997+
s.force_bci(bci());
1998+
s.next();
1999+
if (s.cur_bc() == Bytecodes::_getfield && !needs_patching) {
2000+
ciField* next_field = s.get_field(will_link);
2001+
bool next_needs_patching = !next_field->holder()->is_loaded() ||
2002+
!next_field->will_link(method(), Bytecodes::_getfield) ||
2003+
PatchALot;
2004+
// We can't update the offset for atomic accesses
2005+
bool next_needs_atomic_access = next_field->is_flat() && next_field->is_atomic();
2006+
can_delay_access = C1UseDelayedFlattenedFieldReads && !next_needs_patching && !next_needs_atomic_access && next_field->is_null_free();
2007+
}
20072008
}
2009+
20082010
if (can_delay_access) {
20092011
if (has_pending_load_indexed()) {
20102012
pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
@@ -2018,8 +2020,8 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
20182020
} else {
20192021
scope()->set_wrote_final();
20202022
scope()->set_wrote_fields();
2021-
bool need_membar = false;
20222023
if (has_pending_load_indexed()) {
2024+
assert(field->is_null_free(), "nullable fields do not support delayed accesses yet");
20232025
assert(!needs_patching, "Can't patch delayed field access");
20242026
pending_load_indexed()->update(field, offset - field->holder()->as_inline_klass()->payload_offset());
20252027
NewInstance* vt = new NewInstance(inline_klass, pending_load_indexed()->state_before(), false, true);
@@ -2028,34 +2030,49 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
20282030
apush(append_split(vt));
20292031
append(pending_load_indexed()->load_instr());
20302032
set_pending_load_indexed(nullptr);
2031-
need_membar = true;
2033+
} else if (has_pending_field_access()) {
2034+
assert(field->is_null_free(), "nullable fields do not support delayed accesses yet");
2035+
state_before = pending_field_access()->state_before();
2036+
NewInstance* new_instance = new NewInstance(inline_klass, state_before, false, true);
2037+
_memory->new_instance(new_instance);
2038+
apush(append_split(new_instance));
2039+
copy_inline_content(inline_klass, pending_field_access()->obj(),
2040+
pending_field_access()->offset() + field->offset_in_bytes() - field->holder()->as_inline_klass()->payload_offset(),
2041+
new_instance, inline_klass->payload_offset(), state_before);
2042+
set_pending_field_access(nullptr);
20322043
} else {
2033-
if (has_pending_field_access()) {
2034-
state_before = pending_field_access()->state_before();
2044+
if (!field->is_null_free() && !inline_klass->is_initialized()) {
2045+
// Cannot allocate an instance of inline_klass because it may have not been
2046+
// initialized, bailout for now
2047+
bailout("load from an uninitialized nullable non-atomic flat field");
2048+
return;
20352049
}
2050+
20362051
NewInstance* new_instance = new NewInstance(inline_klass, state_before, false, true);
20372052
_memory->new_instance(new_instance);
2038-
apush(append_split(new_instance));
2039-
if (has_pending_field_access()) {
2040-
copy_inline_content(inline_klass, pending_field_access()->obj(),
2041-
pending_field_access()->offset() + field->offset_in_bytes() - field->holder()->as_inline_klass()->payload_offset(),
2042-
new_instance, inline_klass->payload_offset(), state_before);
2043-
set_pending_field_access(nullptr);
2044-
} else {
2045-
if (field->type()->as_instance_klass()->is_initialized() && field->type()->as_inline_klass()->is_empty()) {
2046-
// Needs an explicit null check because below code does not perform any actual load if there are no fields
2047-
null_check(obj);
2048-
}
2049-
copy_inline_content(inline_klass, obj, field->offset_in_bytes(), new_instance, inline_klass->payload_offset(), state_before);
2053+
append_split(new_instance);
2054+
2055+
if (inline_klass->is_initialized() && inline_klass->is_empty()) {
2056+
// Needs an explicit null check because below code does not perform any actual load if there are no fields
2057+
null_check(obj);
20502058
}
2051-
need_membar = true;
2052-
}
2053-
if (need_membar) {
2054-
// If we allocated a new instance ensure the stores to copy the
2055-
// field contents are visible before any subsequent store that
2056-
// publishes this reference.
2057-
append(new MemBar(lir_membar_storestore));
2059+
copy_inline_content(inline_klass, obj, field->offset_in_bytes(), new_instance, inline_klass->payload_offset(), state_before);
2060+
2061+
Instruction* result = new_instance;
2062+
if (!field->is_null_free()) {
2063+
Value int_zero = append(new Constant(intZero));
2064+
Value object_null = append(new Constant(objectNull));
2065+
Value nm_offset = append(new Constant(new LongConstant(offset + inline_klass->null_marker_offset_in_payload())));
2066+
Value nm = append(new UnsafeGet(T_BOOLEAN, obj, nm_offset, false));
2067+
result = append(new IfOp(nm, Instruction::neq, int_zero, new_instance, object_null, state_before, false));
2068+
}
2069+
apush(result);
20582070
}
2071+
2072+
// If we allocated a new instance ensure the stores to copy the
2073+
// field contents are visible before any subsequent store that
2074+
// publishes this reference.
2075+
append(new MemBar(lir_membar_storestore));
20592076
}
20602077
}
20612078
}
@@ -2090,16 +2107,33 @@ void GraphBuilder::access_field(Bytecodes::Code code) {
20902107
// Flat field
20912108
assert(!needs_patching, "Can't patch flat inline type field access");
20922109
ciInlineKlass* inline_klass = field->type()->as_inline_klass();
2093-
bool is_naturally_atomic = inline_klass->nof_declared_nonstatic_fields() <= 1;
2094-
bool needs_atomic_access = !field->is_null_free() || (field->is_volatile() && !is_naturally_atomic);
2095-
if (needs_atomic_access) {
2110+
if (field->is_atomic()) {
20962111
if (field->is_null_free()) {
20972112
null_check(val);
20982113
}
20992114
append(new StoreField(obj, offset, field, val, false, state_before, needs_patching));
2100-
} else {
2101-
assert(field->is_null_free(), "must be null-free");
2115+
} else if (field->is_null_free()) {
2116+
assert(!inline_klass->is_empty(), "should have been handled");
21022117
copy_inline_content(inline_klass, val, inline_klass->payload_offset(), obj, offset, state_before, field);
2118+
} else {
2119+
if (!inline_klass->is_initialized()) {
2120+
// null_reset_value is not available, bailout for now
2121+
bailout("store to an uninitialized nullable non-atomic flat field");
2122+
return;
2123+
}
2124+
2125+
// Store the subfields when field is a nullable non-atomic field
2126+
Value object_null = append(new Constant(objectNull));
2127+
Value null_reset_value = append(new Constant(new ObjectConstant(inline_klass->get_null_reset_value().as_object())));
2128+
Value src = append(new IfOp(val, Instruction::neq, object_null, val, null_reset_value, state_before, false));
2129+
copy_inline_content(inline_klass, src, inline_klass->payload_offset(), obj, offset, state_before);
2130+
2131+
// Store the null marker
2132+
Value int_one = append(new Constant(new IntConstant(1)));
2133+
Value int_zero = append(new Constant(intZero));
2134+
Value nm = append(new IfOp(val, Instruction::neq, object_null, int_one, int_zero, state_before, false));
2135+
Value nm_offset = append(new Constant(new LongConstant(offset + inline_klass->null_marker_offset_in_payload())));
2136+
append(new UnsafePut(T_BOOLEAN, obj, nm_offset, nm, false));
21032137
}
21042138
}
21052139
break;

src/hotspot/share/c1/c1_LIRGenerator.cpp

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -1650,9 +1650,7 @@ void LIRGenerator::do_StoreField(StoreField* x) {
16501650
ciInlineKlass* vk = field->type()->as_inline_klass();
16511651

16521652
#ifdef ASSERT
1653-
bool is_naturally_atomic = vk->nof_declared_nonstatic_fields() <= 1;
1654-
bool needs_atomic_access = !field->is_null_free() || (field->is_volatile() && !is_naturally_atomic);
1655-
assert(needs_atomic_access, "No atomic access required");
1653+
assert(field->is_atomic(), "No atomic access required %s.%s", field->holder()->name()->as_utf8(), field->name()->as_utf8());
16561654
// ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store.
16571655
assert(!vk->contains_oops() || !UseZGC, "ZGC does not support embedded oops in flat fields");
16581656
#endif
@@ -2098,9 +2096,7 @@ void LIRGenerator::do_LoadField(LoadField* x) {
20982096
if (field->is_flat()) {
20992097
ciInlineKlass* vk = field->type()->as_inline_klass();
21002098
#ifdef ASSERT
2101-
bool is_naturally_atomic = vk->nof_declared_nonstatic_fields() <= 1;
2102-
bool needs_atomic_access = !field->is_null_free() || (field->is_volatile() && !is_naturally_atomic);
2103-
assert(needs_atomic_access, "No atomic access required");
2099+
assert(field->is_atomic(), "No atomic access required");
21042100
assert(x->state_before() != nullptr, "Needs state before");
21052101
#endif
21062102

0 commit comments

Comments
 (0)