mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
Merge branch 'master' into JDK-8372526
This commit is contained in:
commit
84901ac040
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -722,14 +722,13 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
// Bypass the barrier for non-static methods
|
||||
__ ldrh(rscratch1, Address(rmethod, Method::access_flags_offset()));
|
||||
__ andsw(zr, rscratch1, JVM_ACC_STATIC);
|
||||
__ br(Assembler::EQ, L_skip_barrier); // non-static
|
||||
}
|
||||
|
||||
__ load_method_holder(rscratch2, rmethod);
|
||||
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
|
||||
@ -737,7 +736,6 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1508,7 +1506,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
// SVC, HVC, or SMC. Make it a NOP.
|
||||
__ nop();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
__ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
|
||||
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2290,7 +2290,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
__ br(Assembler::NE, L_clinit_barrier_slow);
|
||||
__ ldr(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
|
||||
__ load_method_holder(temp, temp);
|
||||
@ -2340,8 +2341,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ subs(zr, temp, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ br(Assembler::NE, L_clinit_barrier_slow);
|
||||
|
||||
@ -1109,11 +1109,11 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
|
||||
ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
|
||||
addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
|
||||
add(R11_scratch1, R12_scratch2, R12_scratch2);
|
||||
add(R11_scratch1, R11_scratch1, R12_scratch2);
|
||||
cmpd(CR0, R11_scratch1, R14_bcp);
|
||||
beq(CR0, verify_continue);
|
||||
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), R19_method, R14_bcp, R28_mdx);
|
||||
|
||||
bind(verify_continue);
|
||||
#endif
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1237,14 +1237,13 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
// Bypass the barrier for non-static methods
|
||||
__ lhz(R0, in_bytes(Method::access_flags_offset()), R19_method);
|
||||
__ andi_(R0, R0, JVM_ACC_STATIC);
|
||||
__ beq(CR0, L_skip_barrier); // non-static
|
||||
}
|
||||
|
||||
Register klass = R11_scratch1;
|
||||
__ load_method_holder(klass, R19_method);
|
||||
@ -1256,7 +1255,6 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm, /* tmp register*/ ic_klass, /* tmp register*/ receiver_klass, /* tmp register*/ code);
|
||||
@ -2210,7 +2208,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
// --------------------------------------------------------------------------
|
||||
vep_start_pc = (intptr_t)__ pc();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = r_temp_1;
|
||||
// Notify OOP recorder (don't need the relocation)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2199,7 +2199,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no, Register Rca
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = Rscratch;
|
||||
const Register klass = Rscratch;
|
||||
|
||||
@ -2244,8 +2245,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no, Register Rcac
|
||||
__ isync(); // Order load wrt. succeeding loads.
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = R4_ARG2;
|
||||
|
||||
// InterpreterRuntime::resolve_get_put sets field_holder and finally release-stores put_code.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -637,14 +637,13 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
// Bypass the barrier for non-static methods
|
||||
__ load_unsigned_short(t0, Address(xmethod, Method::access_flags_offset()));
|
||||
__ test_bit(t1, t0, exact_log2(JVM_ACC_STATIC));
|
||||
__ beqz(t1, L_skip_barrier); // non-static
|
||||
}
|
||||
|
||||
__ load_method_holder(t1, xmethod);
|
||||
__ clinit_barrier(t1, t0, &L_skip_barrier);
|
||||
@ -652,7 +651,6 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1443,7 +1441,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
__ nop(); // 4 bytes
|
||||
}
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
__ mov_metadata(t1, method->method_holder()); // InstanceKlass*
|
||||
__ clinit_barrier(t1, t0, &L_skip_barrier);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
@ -2192,7 +2192,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ mv(t0, (int) code);
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
__ bne(temp, t0, L_clinit_barrier_slow); // have we resolved this bytecode?
|
||||
__ ld(temp, Address(Rcache, in_bytes(ResolvedMethodEntry::method_offset())));
|
||||
__ load_method_holder(temp, temp);
|
||||
@ -2243,8 +2244,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ mv(t0, (int) code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ bne(temp, t0, L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1567,7 +1567,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
|
||||
//---------------------------------------------------------------------
|
||||
wrapper_VEPStart = __ offset();
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = Z_R1_scratch;
|
||||
// Notify OOP recorder (don't need the relocation)
|
||||
@ -2378,13 +2379,12 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
// Bypass the barrier for non-static methods
|
||||
__ testbit_ushort(Address(Z_method, Method::access_flags_offset()), JVM_ACC_STATIC_BIT);
|
||||
__ z_bfalse(L_skip_barrier); // non-static
|
||||
}
|
||||
|
||||
Register klass = Z_R11;
|
||||
__ load_method_holder(klass, Z_method);
|
||||
@ -2395,7 +2395,6 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
|
||||
gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
|
||||
return;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2024 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -2377,7 +2377,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ z_cli(Address(Rcache, bc_offset), code);
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = Z_R1_scratch;
|
||||
const Register klass = Z_R1_scratch;
|
||||
__ z_brne(L_clinit_barrier_slow);
|
||||
@ -2427,8 +2428,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ z_cli(Address(cache, code_offset), code);
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = index;
|
||||
|
||||
__ z_brne(L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1043,16 +1043,15 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = nullptr;
|
||||
if (VM_Version::supports_fast_class_init_checks()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register method = rbx;
|
||||
|
||||
{ // Bypass the barrier for non-static methods
|
||||
// Bypass the barrier for non-static methods
|
||||
Register flags = rscratch1;
|
||||
__ load_unsigned_short(flags, Address(method, Method::access_flags_offset()));
|
||||
__ testl(flags, JVM_ACC_STATIC);
|
||||
__ jcc(Assembler::zero, L_skip_barrier); // non-static
|
||||
}
|
||||
|
||||
Register klass = rscratch1;
|
||||
__ load_method_holder(klass, method);
|
||||
@ -1062,7 +1061,6 @@ void SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm,
|
||||
|
||||
__ bind(L_skip_barrier);
|
||||
entry_address[AdapterBlob::C2I_No_Clinit_Check] = __ pc();
|
||||
}
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
bs->c2i_entry_barrier(masm);
|
||||
@ -1904,7 +1902,8 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
||||
|
||||
int vep_offset = ((intptr_t)__ pc()) - start;
|
||||
|
||||
if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
|
||||
if (method->needs_clinit_barrier()) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
Label L_skip_barrier;
|
||||
Register klass = r10;
|
||||
__ mov_metadata(klass, method->method_holder()); // InstanceKlass*
|
||||
@ -3602,4 +3601,3 @@ RuntimeStub* SharedRuntime::generate_jfr_return_lease() {
|
||||
}
|
||||
|
||||
#endif // INCLUDE_JFR
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -64,6 +64,39 @@ static address kyberAvx512ConstsAddr(int offset) {
|
||||
|
||||
const Register scratch = r10;
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint8_t kyberAvx512_12To16Dup[] = {
|
||||
// 0 - 63
|
||||
0, 1, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15, 16,
|
||||
16, 17, 18, 19, 19, 20, 21, 22, 22, 23, 24, 25, 25, 26, 27, 28, 28, 29, 30,
|
||||
31, 31, 32, 33, 34, 34, 35, 36, 37, 37, 38, 39, 40, 40, 41, 42, 43, 43, 44,
|
||||
45, 46, 46, 47
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16DupAddr() {
|
||||
return (address) kyberAvx512_12To16Dup;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512_12To16Shift[] = {
|
||||
// 0 - 31
|
||||
0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0, 4, 0,
|
||||
4, 0, 4, 0, 4, 0, 4
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16ShiftAddr() {
|
||||
return (address) kyberAvx512_12To16Shift;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint64_t kyberAvx512_12To16And[] = {
|
||||
// 0 - 7
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF,
|
||||
0x0FFF0FFF0FFF0FFF, 0x0FFF0FFF0FFF0FFF
|
||||
};
|
||||
|
||||
static address kyberAvx512_12To16AndAddr() {
|
||||
return (address) kyberAvx512_12To16And;
|
||||
}
|
||||
|
||||
ATTRIBUTE_ALIGNED(64) static const uint16_t kyberAvx512NttPerms[] = {
|
||||
// 0
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
||||
@ -822,10 +855,65 @@ address generate_kyber12To16_avx512(StubGenerator *stubgen,
|
||||
|
||||
const Register perms = r11;
|
||||
|
||||
Label Loop;
|
||||
Label Loop, VBMILoop;
|
||||
|
||||
__ addptr(condensed, condensedOffs);
|
||||
|
||||
if (VM_Version::supports_avx512_vbmi()) {
|
||||
// mask load for the first 48 bytes of each vector
|
||||
__ mov64(rax, 0x0000FFFFFFFFFFFF);
|
||||
__ kmovql(k1, rax);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16DupAddr()));
|
||||
__ evmovdqub(xmm20, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16ShiftAddr()));
|
||||
__ evmovdquw(xmm21, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16AndAddr()));
|
||||
__ evmovdquq(xmm22, Address(perms), Assembler::AVX_512bit);
|
||||
|
||||
__ align(OptoLoopAlignment);
|
||||
__ BIND(VBMILoop);
|
||||
|
||||
__ evmovdqub(xmm0, k1, Address(condensed, 0), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm1, k1, Address(condensed, 48), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm2, k1, Address(condensed, 96), false,
|
||||
Assembler::AVX_512bit);
|
||||
__ evmovdqub(xmm3, k1, Address(condensed, 144), false,
|
||||
Assembler::AVX_512bit);
|
||||
|
||||
__ evpermb(xmm4, k0, xmm20, xmm0, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm5, k0, xmm20, xmm1, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm6, k0, xmm20, xmm2, false, Assembler::AVX_512bit);
|
||||
__ evpermb(xmm7, k0, xmm20, xmm3, false, Assembler::AVX_512bit);
|
||||
|
||||
__ evpsrlvw(xmm4, xmm4, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm5, xmm5, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm6, xmm6, xmm21, Assembler::AVX_512bit);
|
||||
__ evpsrlvw(xmm7, xmm7, xmm21, Assembler::AVX_512bit);
|
||||
|
||||
__ evpandq(xmm0, xmm22, xmm4, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm1, xmm22, xmm5, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm2, xmm22, xmm6, Assembler::AVX_512bit);
|
||||
__ evpandq(xmm3, xmm22, xmm7, Assembler::AVX_512bit);
|
||||
|
||||
store4regs(parsed, 0, xmm0_3, _masm);
|
||||
|
||||
__ addptr(condensed, 192);
|
||||
__ addptr(parsed, 256);
|
||||
__ subl(parsedLength, 128);
|
||||
__ jcc(Assembler::greater, VBMILoop);
|
||||
|
||||
__ leave(); // required for proper stackwalking of RuntimeStub frame
|
||||
__ mov64(rax, 0); // return 0
|
||||
__ ret(0);
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
__ lea(perms, ExternalAddress(kyberAvx512_12To16PermsAddr()));
|
||||
|
||||
load4regs(xmm24_27, perms, 0, _masm);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2216,7 +2216,8 @@ void TemplateTable::resolve_cache_and_index_for_method(int byte_no,
|
||||
__ cmpl(temp, code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static methods
|
||||
if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
|
||||
if (bytecode() == Bytecodes::_invokestatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register method = temp;
|
||||
const Register klass = temp;
|
||||
|
||||
@ -2264,8 +2265,8 @@ void TemplateTable::resolve_cache_and_index_for_field(int byte_no,
|
||||
__ cmpl(temp, code); // have we resolved this bytecode?
|
||||
|
||||
// Class initialization barrier for static fields
|
||||
if (VM_Version::supports_fast_class_init_checks() &&
|
||||
(bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic)) {
|
||||
if (bytecode() == Bytecodes::_getstatic || bytecode() == Bytecodes::_putstatic) {
|
||||
assert(VM_Version::supports_fast_class_init_checks(), "sanity");
|
||||
const Register field_holder = temp;
|
||||
|
||||
__ jcc(Assembler::notEqual, L_clinit_barrier_slow);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -258,10 +258,18 @@ bool os::free_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::available_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Aix::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Aix::available_memory(physical_memory_size_type& value) {
|
||||
os::Aix::meminfo_t mi;
|
||||
if (os::Aix::get_meminfo(&mi)) {
|
||||
@ -273,6 +281,10 @@ bool os::Aix::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
perfstat_memory_total_t memory_info;
|
||||
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
|
||||
return false;
|
||||
@ -282,6 +294,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
perfstat_memory_total_t memory_info;
|
||||
if (libperfstat::perfstat_memory_total(nullptr, &memory_info, sizeof(perfstat_memory_total_t), 1) == -1) {
|
||||
return false;
|
||||
@ -294,6 +310,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return Aix::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Aix::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() { return (size_t)0; }
|
||||
|
||||
// Cpu architecture string
|
||||
@ -2264,6 +2284,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
|
||||
assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
|
||||
return online_cpus;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -137,10 +137,18 @@ bool os::available_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Bsd::available_memory(value);
|
||||
}
|
||||
|
||||
// Available here means free. Note that this number is of no much use. As an estimate
|
||||
// for future memory pressure it is far too conservative, since MacOS will use a lot
|
||||
// of unused memory for caches, and return it willingly in case of needs.
|
||||
@ -181,6 +189,10 @@ void os::Bsd::print_uptime_info(outputStream* st) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
#if defined(__APPLE__)
|
||||
struct xsw_usage vmusage;
|
||||
size_t size = sizeof(vmusage);
|
||||
@ -195,6 +207,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
#if defined(__APPLE__)
|
||||
struct xsw_usage vmusage;
|
||||
size_t size = sizeof(vmusage);
|
||||
@ -212,6 +228,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return Bsd::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Bsd::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() {
|
||||
size_t rss = 0;
|
||||
#ifdef __APPLE__
|
||||
@ -2189,6 +2209,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
return _processor_count;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -631,22 +631,20 @@ void CgroupSubsystemFactory::cleanup(CgroupInfo* cg_infos) {
|
||||
* return:
|
||||
* true if there were no errors. false otherwise.
|
||||
*/
|
||||
bool CgroupSubsystem::active_processor_count(int& value) {
|
||||
int cpu_count;
|
||||
int result = -1;
|
||||
|
||||
bool CgroupSubsystem::active_processor_count(double& value) {
|
||||
// We use a cache with a timeout to avoid performing expensive
|
||||
// computations in the event this function is called frequently.
|
||||
// [See 8227006].
|
||||
CachingCgroupController<CgroupCpuController>* contrl = cpu_controller();
|
||||
CachedMetric* cpu_limit = contrl->metrics_cache();
|
||||
CachingCgroupController<CgroupCpuController, double>* contrl = cpu_controller();
|
||||
CachedMetric<double>* cpu_limit = contrl->metrics_cache();
|
||||
if (!cpu_limit->should_check_metric()) {
|
||||
value = (int)cpu_limit->value();
|
||||
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %d", value);
|
||||
value = cpu_limit->value();
|
||||
log_trace(os, container)("CgroupSubsystem::active_processor_count (cached): %.2f", value);
|
||||
return true;
|
||||
}
|
||||
|
||||
cpu_count = os::Linux::active_processor_count();
|
||||
int cpu_count = os::Linux::active_processor_count();
|
||||
double result = -1;
|
||||
if (!CgroupUtil::processor_count(contrl->controller(), cpu_count, result)) {
|
||||
return false;
|
||||
}
|
||||
@ -671,8 +669,8 @@ bool CgroupSubsystem::active_processor_count(int& value) {
|
||||
*/
|
||||
bool CgroupSubsystem::memory_limit_in_bytes(physical_memory_size_type upper_bound,
|
||||
physical_memory_size_type& value) {
|
||||
CachingCgroupController<CgroupMemoryController>* contrl = memory_controller();
|
||||
CachedMetric* memory_limit = contrl->metrics_cache();
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* contrl = memory_controller();
|
||||
CachedMetric<physical_memory_size_type>* memory_limit = contrl->metrics_cache();
|
||||
if (!memory_limit->should_check_metric()) {
|
||||
value = memory_limit->value();
|
||||
return true;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -181,9 +181,10 @@ class CgroupController: public CHeapObj<mtInternal> {
|
||||
static bool limit_from_str(char* limit_str, physical_memory_size_type& value);
|
||||
};
|
||||
|
||||
template <typename MetricType>
|
||||
class CachedMetric : public CHeapObj<mtInternal>{
|
||||
private:
|
||||
volatile physical_memory_size_type _metric;
|
||||
volatile MetricType _metric;
|
||||
volatile jlong _next_check_counter;
|
||||
public:
|
||||
CachedMetric() {
|
||||
@ -193,8 +194,8 @@ class CachedMetric : public CHeapObj<mtInternal>{
|
||||
bool should_check_metric() {
|
||||
return os::elapsed_counter() > _next_check_counter;
|
||||
}
|
||||
physical_memory_size_type value() { return _metric; }
|
||||
void set_value(physical_memory_size_type value, jlong timeout) {
|
||||
MetricType value() { return _metric; }
|
||||
void set_value(MetricType value, jlong timeout) {
|
||||
_metric = value;
|
||||
// Metric is unlikely to change, but we want to remain
|
||||
// responsive to configuration changes. A very short grace time
|
||||
@ -205,19 +206,19 @@ class CachedMetric : public CHeapObj<mtInternal>{
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
template <class T, typename MetricType>
|
||||
class CachingCgroupController : public CHeapObj<mtInternal> {
|
||||
private:
|
||||
T* _controller;
|
||||
CachedMetric* _metrics_cache;
|
||||
CachedMetric<MetricType>* _metrics_cache;
|
||||
|
||||
public:
|
||||
CachingCgroupController(T* cont) {
|
||||
_controller = cont;
|
||||
_metrics_cache = new CachedMetric();
|
||||
_metrics_cache = new CachedMetric<MetricType>();
|
||||
}
|
||||
|
||||
CachedMetric* metrics_cache() { return _metrics_cache; }
|
||||
CachedMetric<MetricType>* metrics_cache() { return _metrics_cache; }
|
||||
T* controller() { return _controller; }
|
||||
};
|
||||
|
||||
@ -277,7 +278,7 @@ class CgroupMemoryController: public CHeapObj<mtInternal> {
|
||||
class CgroupSubsystem: public CHeapObj<mtInternal> {
|
||||
public:
|
||||
bool memory_limit_in_bytes(physical_memory_size_type upper_bound, physical_memory_size_type& value);
|
||||
bool active_processor_count(int& value);
|
||||
bool active_processor_count(double& value);
|
||||
|
||||
virtual bool pids_max(uint64_t& value) = 0;
|
||||
virtual bool pids_current(uint64_t& value) = 0;
|
||||
@ -286,8 +287,8 @@ class CgroupSubsystem: public CHeapObj<mtInternal> {
|
||||
virtual char * cpu_cpuset_cpus() = 0;
|
||||
virtual char * cpu_cpuset_memory_nodes() = 0;
|
||||
virtual const char * container_type() = 0;
|
||||
virtual CachingCgroupController<CgroupMemoryController>* memory_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupCpuController>* cpu_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() = 0;
|
||||
virtual CachingCgroupController<CgroupCpuController, double>* cpu_controller() = 0;
|
||||
virtual CgroupCpuacctController* cpuacct_controller() = 0;
|
||||
|
||||
bool cpu_quota(int& value);
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Red Hat, Inc.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,9 +26,8 @@
|
||||
#include "cgroupUtil_linux.hpp"
|
||||
#include "os_linux.hpp"
|
||||
|
||||
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, int& value) {
|
||||
bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound, double& value) {
|
||||
assert(upper_bound > 0, "upper bound of cpus must be positive");
|
||||
int limit_count = upper_bound;
|
||||
int quota = -1;
|
||||
int period = -1;
|
||||
if (!cpu_ctrl->cpu_quota(quota)) {
|
||||
@ -37,20 +37,15 @@ bool CgroupUtil::processor_count(CgroupCpuController* cpu_ctrl, int upper_bound,
|
||||
return false;
|
||||
}
|
||||
int quota_count = 0;
|
||||
int result = upper_bound;
|
||||
double result = upper_bound;
|
||||
|
||||
if (quota > -1 && period > 0) {
|
||||
quota_count = ceilf((float)quota / (float)period);
|
||||
log_trace(os, container)("CPU Quota count based on quota/period: %d", quota_count);
|
||||
if (quota > 0 && period > 0) { // Use quotas
|
||||
double cpu_quota = static_cast<double>(quota) / period;
|
||||
log_trace(os, container)("CPU Quota based on quota/period: %.2f", cpu_quota);
|
||||
result = MIN2(result, cpu_quota);
|
||||
}
|
||||
|
||||
// Use quotas
|
||||
if (quota_count != 0) {
|
||||
limit_count = quota_count;
|
||||
}
|
||||
|
||||
result = MIN2(upper_bound, limit_count);
|
||||
log_trace(os, container)("OSContainer::active_processor_count: %d", result);
|
||||
log_trace(os, container)("OSContainer::active_processor_count: %.2f", result);
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
@ -73,11 +68,11 @@ physical_memory_size_type CgroupUtil::get_updated_mem_limit(CgroupMemoryControll
|
||||
|
||||
// Get an updated cpu limit. The return value is strictly less than or equal to the
|
||||
// passed in 'lowest' value.
|
||||
int CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
|
||||
double CgroupUtil::get_updated_cpu_limit(CgroupCpuController* cpu,
|
||||
int lowest,
|
||||
int upper_bound) {
|
||||
assert(lowest > 0 && lowest <= upper_bound, "invariant");
|
||||
int cpu_limit_val = -1;
|
||||
double cpu_limit_val = -1;
|
||||
if (CgroupUtil::processor_count(cpu, upper_bound, cpu_limit_val) && cpu_limit_val != upper_bound) {
|
||||
assert(cpu_limit_val <= upper_bound, "invariant");
|
||||
if (lowest > cpu_limit_val) {
|
||||
@ -172,7 +167,7 @@ void CgroupUtil::adjust_controller(CgroupCpuController* cpu) {
|
||||
assert(cg_path[0] == '/', "cgroup path must start with '/'");
|
||||
int host_cpus = os::Linux::active_processor_count();
|
||||
int lowest_limit = host_cpus;
|
||||
int cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
double cpus = get_updated_cpu_limit(cpu, lowest_limit, host_cpus);
|
||||
int orig_limit = lowest_limit != host_cpus ? lowest_limit : host_cpus;
|
||||
char* limit_cg_path = nullptr;
|
||||
while ((last_slash = strrchr(cg_path, '/')) != cg_path) {
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Red Hat, Inc.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +32,7 @@
|
||||
class CgroupUtil: AllStatic {
|
||||
|
||||
public:
|
||||
static bool processor_count(CgroupCpuController* cpu, int upper_bound, int& value);
|
||||
static bool processor_count(CgroupCpuController* cpu, int upper_bound, double& value);
|
||||
// Given a memory controller, adjust its path to a point in the hierarchy
|
||||
// that represents the closest memory limit.
|
||||
static void adjust_controller(CgroupMemoryController* m);
|
||||
@ -42,9 +43,7 @@ class CgroupUtil: AllStatic {
|
||||
static physical_memory_size_type get_updated_mem_limit(CgroupMemoryController* m,
|
||||
physical_memory_size_type lowest,
|
||||
physical_memory_size_type upper_bound);
|
||||
static int get_updated_cpu_limit(CgroupCpuController* c,
|
||||
int lowest,
|
||||
int upper_bound);
|
||||
static double get_updated_cpu_limit(CgroupCpuController* c, int lowest, int upper_bound);
|
||||
};
|
||||
|
||||
#endif // CGROUP_UTIL_LINUX_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -328,8 +328,8 @@ CgroupV1Subsystem::CgroupV1Subsystem(CgroupV1Controller* cpuset,
|
||||
_pids(pids) {
|
||||
CgroupUtil::adjust_controller(memory);
|
||||
CgroupUtil::adjust_controller(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
|
||||
}
|
||||
|
||||
bool CgroupV1Subsystem::is_containerized() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -214,15 +214,15 @@ class CgroupV1Subsystem: public CgroupSubsystem {
|
||||
const char * container_type() override {
|
||||
return "cgroupv1";
|
||||
}
|
||||
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
|
||||
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; }
|
||||
|
||||
private:
|
||||
/* controllers */
|
||||
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
|
||||
CgroupV1Controller* _cpuset = nullptr;
|
||||
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
|
||||
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
|
||||
CgroupV1CpuacctController* _cpuacct = nullptr;
|
||||
CgroupV1Controller* _pids = nullptr;
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Red Hat Inc.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -156,8 +156,8 @@ CgroupV2Subsystem::CgroupV2Subsystem(CgroupV2MemoryController * memory,
|
||||
_unified(unified) {
|
||||
CgroupUtil::adjust_controller(memory);
|
||||
CgroupUtil::adjust_controller(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController>(cpu);
|
||||
_memory = new CachingCgroupController<CgroupMemoryController, physical_memory_size_type>(memory);
|
||||
_cpu = new CachingCgroupController<CgroupCpuController, double>(cpu);
|
||||
_cpuacct = cpuacct;
|
||||
}
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2024, Red Hat Inc.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -152,8 +152,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
|
||||
/* One unified controller */
|
||||
CgroupV2Controller _unified;
|
||||
/* Caching wrappers for cpu/memory metrics */
|
||||
CachingCgroupController<CgroupMemoryController>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupCpuController>* _cpu = nullptr;
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* _memory = nullptr;
|
||||
CachingCgroupController<CgroupCpuController, double>* _cpu = nullptr;
|
||||
|
||||
CgroupCpuacctController* _cpuacct = nullptr;
|
||||
|
||||
@ -175,8 +175,8 @@ class CgroupV2Subsystem: public CgroupSubsystem {
|
||||
const char * container_type() override {
|
||||
return "cgroupv2";
|
||||
}
|
||||
CachingCgroupController<CgroupMemoryController>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController>* cpu_controller() override { return _cpu; }
|
||||
CachingCgroupController<CgroupMemoryController, physical_memory_size_type>* memory_controller() override { return _memory; }
|
||||
CachingCgroupController<CgroupCpuController, double>* cpu_controller() override { return _cpu; }
|
||||
CgroupCpuacctController* cpuacct_controller() override { return _cpuacct; };
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -86,8 +86,8 @@ void OSContainer::init() {
|
||||
// 2.) On a physical Linux system with a limit enforced by other means (like systemd slice)
|
||||
physical_memory_size_type mem_limit_val = value_unlimited;
|
||||
(void)memory_limit_in_bytes(mem_limit_val); // discard error and use default
|
||||
int host_cpus = os::Linux::active_processor_count();
|
||||
int cpus = host_cpus;
|
||||
double host_cpus = os::Linux::active_processor_count();
|
||||
double cpus = host_cpus;
|
||||
(void)active_processor_count(cpus); // discard error and use default
|
||||
any_mem_cpu_limit_present = mem_limit_val != value_unlimited || host_cpus != cpus;
|
||||
if (any_mem_cpu_limit_present) {
|
||||
@ -127,8 +127,7 @@ bool OSContainer::available_memory_in_bytes(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_swap,
|
||||
physical_memory_size_type& value) {
|
||||
bool OSContainer::available_swap_in_bytes(physical_memory_size_type& value) {
|
||||
physical_memory_size_type mem_limit = 0;
|
||||
physical_memory_size_type mem_swap_limit = 0;
|
||||
if (memory_limit_in_bytes(mem_limit) &&
|
||||
@ -179,8 +178,7 @@ bool OSContainer::available_swap_in_bytes(physical_memory_size_type host_free_sw
|
||||
assert(num < 25, "buffer too small");
|
||||
mem_limit_buf[num] = '\0';
|
||||
log_trace(os,container)("OSContainer::available_swap_in_bytes: container_swap_limit=%s"
|
||||
" container_mem_limit=%s, host_free_swap: " PHYS_MEM_TYPE_FORMAT,
|
||||
mem_swap_buf, mem_limit_buf, host_free_swap);
|
||||
" container_mem_limit=%s", mem_swap_buf, mem_limit_buf);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -252,7 +250,7 @@ char * OSContainer::cpu_cpuset_memory_nodes() {
|
||||
return cgroup_subsystem->cpu_cpuset_memory_nodes();
|
||||
}
|
||||
|
||||
bool OSContainer::active_processor_count(int& value) {
|
||||
bool OSContainer::active_processor_count(double& value) {
|
||||
assert(cgroup_subsystem != nullptr, "cgroup subsystem not available");
|
||||
return cgroup_subsystem->active_processor_count(value);
|
||||
}
|
||||
@ -291,11 +289,13 @@ template<typename T> struct metric_fmt;
|
||||
template<> struct metric_fmt<unsigned long long int> { static constexpr const char* fmt = "%llu"; };
|
||||
template<> struct metric_fmt<unsigned long int> { static constexpr const char* fmt = "%lu"; };
|
||||
template<> struct metric_fmt<int> { static constexpr const char* fmt = "%d"; };
|
||||
template<> struct metric_fmt<double> { static constexpr const char* fmt = "%.2f"; };
|
||||
template<> struct metric_fmt<const char*> { static constexpr const char* fmt = "%s"; };
|
||||
|
||||
template void OSContainer::print_container_metric<unsigned long long int>(outputStream*, const char*, unsigned long long int, const char*);
|
||||
template void OSContainer::print_container_metric<unsigned long int>(outputStream*, const char*, unsigned long int, const char*);
|
||||
template void OSContainer::print_container_metric<int>(outputStream*, const char*, int, const char*);
|
||||
template void OSContainer::print_container_metric<double>(outputStream*, const char*, double, const char*);
|
||||
template void OSContainer::print_container_metric<const char*>(outputStream*, const char*, const char*, const char*);
|
||||
|
||||
template <typename T>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,8 +72,7 @@ class OSContainer: AllStatic {
|
||||
static const char * container_type();
|
||||
|
||||
static bool available_memory_in_bytes(physical_memory_size_type& value);
|
||||
static bool available_swap_in_bytes(physical_memory_size_type host_free_swap,
|
||||
physical_memory_size_type& value);
|
||||
static bool available_swap_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_and_swap_limit_in_bytes(physical_memory_size_type& value);
|
||||
static bool memory_and_swap_usage_in_bytes(physical_memory_size_type& value);
|
||||
@ -84,7 +83,7 @@ class OSContainer: AllStatic {
|
||||
static bool rss_usage_in_bytes(physical_memory_size_type& value);
|
||||
static bool cache_usage_in_bytes(physical_memory_size_type& value);
|
||||
|
||||
static bool active_processor_count(int& value);
|
||||
static bool active_processor_count(double& value);
|
||||
|
||||
static char * cpu_cpuset_cpus();
|
||||
static char * cpu_cpuset_memory_nodes();
|
||||
|
||||
@ -211,15 +211,58 @@ static bool suppress_primordial_thread_resolution = false;
|
||||
|
||||
// utility functions
|
||||
|
||||
bool os::is_containerized() {
|
||||
return OSContainer::is_containerized();
|
||||
}
|
||||
|
||||
bool os::Container::memory_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_limit_in_bytes(result) && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_soft_limit_in_bytes(result) && result != 0 && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
|
||||
physical_memory_size_type result = 0;
|
||||
if (OSContainer::memory_throttle_limit_in_bytes(result) && result != value_unlimited) {
|
||||
value = result;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::used_memory(physical_memory_size_type& value) {
|
||||
return OSContainer::memory_usage_in_bytes(value);
|
||||
}
|
||||
|
||||
bool os::available_memory(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
|
||||
if (is_containerized() && Container::available_memory(value)) {
|
||||
log_trace(os)("available container memory: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return Machine::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return Linux::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Container::available_memory(physical_memory_size_type& value) {
|
||||
return OSContainer::available_memory_in_bytes(value);
|
||||
}
|
||||
|
||||
bool os::Linux::available_memory(physical_memory_size_type& value) {
|
||||
physical_memory_size_type avail_mem = 0;
|
||||
|
||||
@ -251,11 +294,15 @@ bool os::Linux::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized() && OSContainer::available_memory_in_bytes(value)) {
|
||||
if (is_containerized() && Container::available_memory(value)) {
|
||||
log_trace(os)("free container memory: " PHYS_MEM_TYPE_FORMAT, value);
|
||||
return true;
|
||||
}
|
||||
|
||||
return Machine::free_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return Linux::free_memory(value);
|
||||
}
|
||||
|
||||
@ -274,7 +321,17 @@ bool os::Linux::free_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (is_containerized() && Container::total_swap_space(value)) {
|
||||
return true;
|
||||
} // fallback to the host swap space if the container value fails
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
return Linux::host_swap(value);
|
||||
}
|
||||
|
||||
bool os::Container::total_swap_space(physical_memory_size_type& value) {
|
||||
physical_memory_size_type mem_swap_limit = value_unlimited;
|
||||
physical_memory_size_type memory_limit = value_unlimited;
|
||||
if (OSContainer::memory_and_swap_limit_in_bytes(mem_swap_limit) &&
|
||||
@ -285,8 +342,7 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} // fallback to the host swap space if the container returned unlimited
|
||||
return Linux::host_swap(value);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool host_free_swap_f(physical_memory_size_type& value) {
|
||||
@ -309,32 +365,45 @@ bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
physical_memory_size_type host_free_swap_val = MIN2(total_swap_space, host_free_swap);
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (OSContainer::available_swap_in_bytes(host_free_swap_val, value)) {
|
||||
if (is_containerized()) {
|
||||
if (Container::free_swap_space(value)) {
|
||||
return true;
|
||||
}
|
||||
// Fall through to use host value
|
||||
log_trace(os,container)("os::free_swap_space: containerized value unavailable"
|
||||
" returning host value: " PHYS_MEM_TYPE_FORMAT, host_free_swap_val);
|
||||
}
|
||||
|
||||
value = host_free_swap_val;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
return host_free_swap_f(value);
|
||||
}
|
||||
|
||||
bool os::Container::free_swap_space(physical_memory_size_type& value) {
|
||||
return OSContainer::available_swap_in_bytes(value);
|
||||
}
|
||||
|
||||
physical_memory_size_type os::physical_memory() {
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (is_containerized()) {
|
||||
physical_memory_size_type mem_limit = value_unlimited;
|
||||
if (OSContainer::memory_limit_in_bytes(mem_limit) && mem_limit != value_unlimited) {
|
||||
if (Container::memory_limit(mem_limit) && mem_limit != value_unlimited) {
|
||||
log_trace(os)("total container memory: " PHYS_MEM_TYPE_FORMAT, mem_limit);
|
||||
return mem_limit;
|
||||
}
|
||||
}
|
||||
|
||||
physical_memory_size_type phys_mem = Linux::physical_memory();
|
||||
physical_memory_size_type phys_mem = Machine::physical_memory();
|
||||
log_trace(os)("total system memory: " PHYS_MEM_TYPE_FORMAT, phys_mem);
|
||||
return phys_mem;
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return Linux::physical_memory();
|
||||
}
|
||||
|
||||
// Returns the resident set size (RSS) of the process.
|
||||
// Falls back to using VmRSS from /proc/self/status if /proc/self/smaps_rollup is unavailable.
|
||||
// Note: On kernels with memory cgroups or shared memory, VmRSS may underreport RSS.
|
||||
@ -2439,20 +2508,21 @@ bool os::Linux::print_container_info(outputStream* st) {
|
||||
OSContainer::print_container_metric(st, "cpu_memory_nodes", p != nullptr ? p : "not supported");
|
||||
free(p);
|
||||
|
||||
int i = -1;
|
||||
bool supported = OSContainer::active_processor_count(i);
|
||||
double cpus = -1;
|
||||
bool supported = OSContainer::active_processor_count(cpus);
|
||||
if (supported) {
|
||||
assert(i > 0, "must be");
|
||||
assert(cpus > 0, "must be");
|
||||
if (ActiveProcessorCount > 0) {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", ActiveProcessorCount, "(from -XX:ActiveProcessorCount)");
|
||||
} else {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", i);
|
||||
OSContainer::print_container_metric(st, "active_processor_count", cpus);
|
||||
}
|
||||
} else {
|
||||
OSContainer::print_container_metric(st, "active_processor_count", "not supported");
|
||||
}
|
||||
|
||||
|
||||
int i = -1;
|
||||
supported = OSContainer::cpu_quota(i);
|
||||
if (supported && i > 0) {
|
||||
OSContainer::print_container_metric(st, "cpu_quota", i);
|
||||
@ -4737,15 +4807,26 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
int active_cpus = -1;
|
||||
if (OSContainer::is_containerized() && OSContainer::active_processor_count(active_cpus)) {
|
||||
if (is_containerized()) {
|
||||
double cpu_quota;
|
||||
if (Container::processor_count(cpu_quota)) {
|
||||
int active_cpus = ceilf(cpu_quota); // Round fractional CPU quota up.
|
||||
assert(active_cpus <= Machine::active_processor_count(), "must be");
|
||||
log_trace(os)("active_processor_count: determined by OSContainer: %d",
|
||||
active_cpus);
|
||||
} else {
|
||||
active_cpus = os::Linux::active_processor_count();
|
||||
return active_cpus;
|
||||
}
|
||||
}
|
||||
|
||||
return active_cpus;
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
return os::Linux::active_processor_count();
|
||||
}
|
||||
|
||||
bool os::Container::processor_count(double& value) {
|
||||
return OSContainer::active_processor_count(value);
|
||||
}
|
||||
|
||||
static bool should_warn_invalid_processor_id() {
|
||||
@ -4882,9 +4963,14 @@ int os::open(const char *path, int oflag, int mode) {
|
||||
oflag |= O_CLOEXEC;
|
||||
|
||||
int fd = ::open(path, oflag, mode);
|
||||
if (fd == -1) return -1;
|
||||
// No further checking is needed if open() returned an error or
|
||||
// access mode is not read only.
|
||||
if (fd == -1 || (oflag & O_ACCMODE) != O_RDONLY) {
|
||||
return fd;
|
||||
}
|
||||
|
||||
//If the open succeeded, the file might still be a directory
|
||||
// If the open succeeded and is read only, the file might be a directory
|
||||
// which the JVM doesn't allow to be read.
|
||||
{
|
||||
struct stat buf;
|
||||
int ret = ::fstat(fd, &buf);
|
||||
|
||||
@ -839,10 +839,18 @@ bool os::available_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::available_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::free_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_memory(physical_memory_size_type& value) {
|
||||
return win32::available_memory(value);
|
||||
}
|
||||
|
||||
bool os::win32::available_memory(physical_memory_size_type& value) {
|
||||
// Use GlobalMemoryStatusEx() because GlobalMemoryStatus() may return incorrect
|
||||
// value if total memory is larger than 4GB
|
||||
@ -859,6 +867,10 @@ bool os::win32::available_memory(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::total_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::total_swap_space(physical_memory_size_type& value) {
|
||||
MEMORYSTATUSEX ms;
|
||||
ms.dwLength = sizeof(ms);
|
||||
BOOL res = GlobalMemoryStatusEx(&ms);
|
||||
@ -872,6 +884,10 @@ bool os::total_swap_space(physical_memory_size_type& value) {
|
||||
}
|
||||
|
||||
bool os::free_swap_space(physical_memory_size_type& value) {
|
||||
return Machine::free_swap_space(value);
|
||||
}
|
||||
|
||||
bool os::Machine::free_swap_space(physical_memory_size_type& value) {
|
||||
MEMORYSTATUSEX ms;
|
||||
ms.dwLength = sizeof(ms);
|
||||
BOOL res = GlobalMemoryStatusEx(&ms);
|
||||
@ -888,6 +904,10 @@ physical_memory_size_type os::physical_memory() {
|
||||
return win32::physical_memory();
|
||||
}
|
||||
|
||||
physical_memory_size_type os::Machine::physical_memory() {
|
||||
return win32::physical_memory();
|
||||
}
|
||||
|
||||
size_t os::rss() {
|
||||
size_t rss = 0;
|
||||
PROCESS_MEMORY_COUNTERS_EX pmex;
|
||||
@ -911,6 +931,10 @@ int os::active_processor_count() {
|
||||
return ActiveProcessorCount;
|
||||
}
|
||||
|
||||
return Machine::active_processor_count();
|
||||
}
|
||||
|
||||
int os::Machine::active_processor_count() {
|
||||
bool schedules_all_processor_groups = win32::is_windows_11_or_greater() || win32::is_windows_server_2022_or_greater();
|
||||
if (UseAllWindowsProcessorGroups && !schedules_all_processor_groups && !win32::processor_group_warning_displayed()) {
|
||||
win32::set_processor_group_warning_displayed(true);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -614,6 +614,10 @@ struct StringTableDeleteCheck : StackObj {
|
||||
};
|
||||
|
||||
void StringTable::clean_dead_entries(JavaThread* jt) {
|
||||
// BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
|
||||
// When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
|
||||
// Take SuspendMark first to keep lock order and avoid deadlock.
|
||||
NativeHeapTrimmer::SuspendMark sm("stringtable");
|
||||
StringTableHash::BulkDeleteTask bdt(_local_table);
|
||||
if (!bdt.prepare(jt)) {
|
||||
return;
|
||||
@ -621,7 +625,6 @@ void StringTable::clean_dead_entries(JavaThread* jt) {
|
||||
|
||||
StringTableDeleteCheck stdc;
|
||||
StringTableDoDelete stdd;
|
||||
NativeHeapTrimmer::SuspendMark sm("stringtable");
|
||||
{
|
||||
TraceTime timer("Clean", TRACETIME_LOG(Debug, stringtable, perf));
|
||||
while(bdt.do_task(jt, stdc, stdd)) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -763,6 +763,10 @@ struct SymbolTableDeleteCheck : StackObj {
|
||||
};
|
||||
|
||||
void SymbolTable::clean_dead_entries(JavaThread* jt) {
|
||||
// BulkDeleteTask::prepare() may take ConcurrentHashTableResize_lock (nosafepoint-2).
|
||||
// When NativeHeapTrimmer is enabled, SuspendMark may take NativeHeapTrimmer::_lock (nosafepoint).
|
||||
// Take SuspendMark first to keep lock order and avoid deadlock.
|
||||
NativeHeapTrimmer::SuspendMark sm("symboltable");
|
||||
SymbolTableHash::BulkDeleteTask bdt(_local_table);
|
||||
if (!bdt.prepare(jt)) {
|
||||
return;
|
||||
@ -770,7 +774,6 @@ void SymbolTable::clean_dead_entries(JavaThread* jt) {
|
||||
|
||||
SymbolTableDeleteCheck stdc;
|
||||
SymbolTableDoDelete stdd;
|
||||
NativeHeapTrimmer::SuspendMark sm("symboltable");
|
||||
{
|
||||
TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf));
|
||||
while (bdt.do_task(jt, stdc, stdd)) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,20 +44,20 @@ G1CardTableClaimTable::~G1CardTableClaimTable() {
|
||||
|
||||
void G1CardTableClaimTable::initialize(uint max_reserved_regions) {
|
||||
assert(_card_claims == nullptr, "Must not be initialized twice");
|
||||
_card_claims = NEW_C_HEAP_ARRAY(uint, max_reserved_regions, mtGC);
|
||||
_card_claims = NEW_C_HEAP_ARRAY(Atomic<uint>, max_reserved_regions, mtGC);
|
||||
_max_reserved_regions = max_reserved_regions;
|
||||
reset_all_to_unclaimed();
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_all_to_unclaimed() {
|
||||
for (uint i = 0; i < _max_reserved_regions; i++) {
|
||||
_card_claims[i] = 0;
|
||||
_card_claims[i].store_relaxed(0);
|
||||
}
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_all_to_claimed() {
|
||||
for (uint i = 0; i < _max_reserved_regions; i++) {
|
||||
_card_claims[i] = (uint)G1HeapRegion::CardsPerRegion;
|
||||
_card_claims[i].store_relaxed((uint)G1HeapRegion::CardsPerRegion);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class G1HeapRegionClosure;
|
||||
|
||||
@ -45,7 +46,7 @@ class G1CardTableClaimTable : public CHeapObj<mtGC> {
|
||||
|
||||
// Card table iteration claim values for every heap region, from 0 (completely unclaimed)
|
||||
// to (>=) G1HeapRegion::CardsPerRegion (completely claimed).
|
||||
uint volatile* _card_claims;
|
||||
Atomic<uint>* _card_claims;
|
||||
|
||||
uint _cards_per_chunk; // For conversion between card index and chunk index.
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,26 +29,25 @@
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
bool G1CardTableClaimTable::has_unclaimed_cards(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::load(&_card_claims[region]) < G1HeapRegion::CardsPerRegion;
|
||||
return _card_claims[region].load_relaxed() < G1HeapRegion::CardsPerRegion;
|
||||
}
|
||||
|
||||
void G1CardTableClaimTable::reset_to_unclaimed(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
AtomicAccess::store(&_card_claims[region], 0u);
|
||||
_card_claims[region].store_relaxed(0u);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_cards(uint region, uint increment) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::fetch_then_add(&_card_claims[region], increment, memory_order_relaxed);
|
||||
return _card_claims[region].fetch_then_add(increment, memory_order_relaxed);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_chunk(uint region) {
|
||||
assert(region < _max_reserved_regions, "Tried to access invalid region %u", region);
|
||||
return AtomicAccess::fetch_then_add(&_card_claims[region], cards_per_chunk(), memory_order_relaxed);
|
||||
return _card_claims[region].fetch_then_add(cards_per_chunk(), memory_order_relaxed);
|
||||
}
|
||||
|
||||
uint G1CardTableClaimTable::claim_all_cards(uint region) {
|
||||
|
||||
@ -103,7 +103,6 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/cpuTimeCounters.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,6 +54,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/threadSMR.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
@ -124,7 +125,7 @@ class G1JavaThreadsListClaimer : public StackObj {
|
||||
ThreadsListHandle _list;
|
||||
uint _claim_step;
|
||||
|
||||
volatile uint _cur_claim;
|
||||
Atomic<uint> _cur_claim;
|
||||
|
||||
// Attempts to claim _claim_step JavaThreads, returning an array of claimed
|
||||
// JavaThread* with count elements. Returns null (and a zero count) if there
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -41,7 +41,6 @@
|
||||
#include "gc/shared/markBitMap.inline.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "oops/stackChunkOop.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/threadSMR.inline.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
@ -53,10 +52,10 @@ inline bool G1STWIsAliveClosure::do_object_b(oop p) {
|
||||
|
||||
inline JavaThread* const* G1JavaThreadsListClaimer::claim(uint& count) {
|
||||
count = 0;
|
||||
if (AtomicAccess::load(&_cur_claim) >= _list.length()) {
|
||||
if (_cur_claim.load_relaxed() >= _list.length()) {
|
||||
return nullptr;
|
||||
}
|
||||
uint claim = AtomicAccess::fetch_then_add(&_cur_claim, _claim_step);
|
||||
uint claim = _cur_claim.fetch_then_add(_claim_step);
|
||||
if (claim >= _list.length()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
#include "gc/g1/g1CollectionSetChooser.hpp"
|
||||
#include "gc/g1/g1HeapRegionRemSet.inline.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/quickSort.hpp"
|
||||
|
||||
// Determine collection set candidates (from marking): For all regions determine
|
||||
@ -50,7 +50,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
|
||||
G1HeapRegion** _data;
|
||||
|
||||
uint volatile _cur_claim_idx;
|
||||
Atomic<uint> _cur_claim_idx;
|
||||
|
||||
static int compare_region_gc_efficiency(G1HeapRegion** rr1, G1HeapRegion** rr2) {
|
||||
G1HeapRegion* r1 = *rr1;
|
||||
@ -105,7 +105,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
|
||||
// Claim a new chunk, returning its bounds [from, to[.
|
||||
void claim_chunk(uint& from, uint& to) {
|
||||
uint result = AtomicAccess::add(&_cur_claim_idx, _chunk_size);
|
||||
uint result = _cur_claim_idx.add_then_fetch(_chunk_size);
|
||||
assert(_max_size > result - 1,
|
||||
"Array too small, is %u should be %u with chunk size %u.",
|
||||
_max_size, result, _chunk_size);
|
||||
@ -121,14 +121,15 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
|
||||
}
|
||||
|
||||
void sort_by_gc_efficiency() {
|
||||
if (_cur_claim_idx == 0) {
|
||||
uint length = _cur_claim_idx.load_relaxed();
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
for (uint i = _cur_claim_idx; i < _max_size; i++) {
|
||||
for (uint i = length; i < _max_size; i++) {
|
||||
assert(_data[i] == nullptr, "must be");
|
||||
}
|
||||
qsort(_data, _cur_claim_idx, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
|
||||
for (uint i = _cur_claim_idx; i < _max_size; i++) {
|
||||
qsort(_data, length, sizeof(_data[0]), (_sort_Fn)compare_region_gc_efficiency);
|
||||
for (uint i = length; i < _max_size; i++) {
|
||||
assert(_data[i] == nullptr, "must be");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -67,7 +67,6 @@
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -148,25 +147,25 @@ bool G1CMMarkStack::initialize() {
|
||||
}
|
||||
|
||||
G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_chunk() {
|
||||
if (_size >= _max_capacity) {
|
||||
if (_size.load_relaxed() >= _max_capacity) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t cur_idx = AtomicAccess::fetch_then_add(&_size, 1u);
|
||||
size_t cur_idx = _size.fetch_then_add(1u);
|
||||
|
||||
if (cur_idx >= _max_capacity) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
size_t bucket = get_bucket(cur_idx);
|
||||
if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
|
||||
if (_buckets[bucket].load_acquire() == nullptr) {
|
||||
if (!_should_grow) {
|
||||
// Prefer to restart the CM.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MutexLocker x(G1MarkStackChunkList_lock, Mutex::_no_safepoint_check_flag);
|
||||
if (AtomicAccess::load_acquire(&_buckets[bucket]) == nullptr) {
|
||||
if (_buckets[bucket].load_acquire() == nullptr) {
|
||||
size_t desired_capacity = bucket_size(bucket) * 2;
|
||||
if (!try_expand_to(desired_capacity)) {
|
||||
return nullptr;
|
||||
@ -175,7 +174,7 @@ G1CMMarkStack::TaskQueueEntryChunk* G1CMMarkStack::ChunkAllocator::allocate_new_
|
||||
}
|
||||
|
||||
size_t bucket_idx = get_bucket_index(cur_idx);
|
||||
TaskQueueEntryChunk* result = ::new (&_buckets[bucket][bucket_idx]) TaskQueueEntryChunk;
|
||||
TaskQueueEntryChunk* result = ::new (&_buckets[bucket].load_relaxed()[bucket_idx]) TaskQueueEntryChunk;
|
||||
result->next = nullptr;
|
||||
return result;
|
||||
}
|
||||
@ -197,10 +196,10 @@ bool G1CMMarkStack::ChunkAllocator::initialize(size_t initial_capacity, size_t m
|
||||
_max_capacity = max_capacity;
|
||||
_num_buckets = get_bucket(_max_capacity) + 1;
|
||||
|
||||
_buckets = NEW_C_HEAP_ARRAY(TaskQueueEntryChunk*, _num_buckets, mtGC);
|
||||
_buckets = NEW_C_HEAP_ARRAY(Atomic<TaskQueueEntryChunk*>, _num_buckets, mtGC);
|
||||
|
||||
for (size_t i = 0; i < _num_buckets; i++) {
|
||||
_buckets[i] = nullptr;
|
||||
_buckets[i].store_relaxed(nullptr);
|
||||
}
|
||||
|
||||
size_t new_capacity = bucket_size(0);
|
||||
@ -240,9 +239,9 @@ G1CMMarkStack::ChunkAllocator::~ChunkAllocator() {
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < _num_buckets; i++) {
|
||||
if (_buckets[i] != nullptr) {
|
||||
MmapArrayAllocator<TaskQueueEntryChunk>::free(_buckets[i], bucket_size(i));
|
||||
_buckets[i] = nullptr;
|
||||
if (_buckets[i].load_relaxed() != nullptr) {
|
||||
MmapArrayAllocator<TaskQueueEntryChunk>::free(_buckets[i].load_relaxed(), bucket_size(i));
|
||||
_buckets[i].store_relaxed(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,7 +258,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
|
||||
// and the new capacity (new_capacity). This step ensures that there are no gaps in the
|
||||
// array and that the capacity accurately reflects the reserved memory.
|
||||
for (; i <= highest_bucket; i++) {
|
||||
if (AtomicAccess::load_acquire(&_buckets[i]) != nullptr) {
|
||||
if (_buckets[i].load_acquire() != nullptr) {
|
||||
continue; // Skip over already allocated buckets.
|
||||
}
|
||||
|
||||
@ -279,7 +278,7 @@ bool G1CMMarkStack::ChunkAllocator::reserve(size_t new_capacity) {
|
||||
return false;
|
||||
}
|
||||
_capacity += bucket_capacity;
|
||||
AtomicAccess::release_store(&_buckets[i], bucket_base);
|
||||
_buckets[i].release_store(bucket_base);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,6 +37,7 @@
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "gc/shared/workerUtils.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/compilerWarnings.hpp"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
|
||||
@ -172,9 +173,9 @@ private:
|
||||
size_t _capacity;
|
||||
size_t _num_buckets;
|
||||
bool _should_grow;
|
||||
TaskQueueEntryChunk* volatile* _buckets;
|
||||
Atomic<TaskQueueEntryChunk*>* _buckets;
|
||||
char _pad0[DEFAULT_PADDING_SIZE];
|
||||
volatile size_t _size;
|
||||
Atomic<size_t> _size;
|
||||
char _pad4[DEFAULT_PADDING_SIZE - sizeof(size_t)];
|
||||
|
||||
size_t bucket_size(size_t bucket) {
|
||||
@ -212,7 +213,7 @@ private:
|
||||
bool initialize(size_t initial_capacity, size_t max_capacity);
|
||||
|
||||
void reset() {
|
||||
_size = 0;
|
||||
_size.store_relaxed(0);
|
||||
_should_grow = false;
|
||||
}
|
||||
|
||||
@ -556,14 +557,14 @@ public:
|
||||
// mark_in_bitmap call. Updates various statistics data.
|
||||
void add_to_liveness(uint worker_id, oop const obj, size_t size);
|
||||
// Did the last marking find a live object between bottom and TAMS?
|
||||
bool contains_live_object(uint region) const { return _region_mark_stats[region]._live_words != 0; }
|
||||
bool contains_live_object(uint region) const { return _region_mark_stats[region].live_words() != 0; }
|
||||
// Live bytes in the given region as determined by concurrent marking, i.e. the amount of
|
||||
// live bytes between bottom and TAMS.
|
||||
size_t live_bytes(uint region) const { return _region_mark_stats[region]._live_words * HeapWordSize; }
|
||||
size_t live_bytes(uint region) const { return _region_mark_stats[region].live_words() * HeapWordSize; }
|
||||
// Set live bytes for concurrent marking.
|
||||
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
|
||||
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words.store_relaxed(live_bytes / HeapWordSize); }
|
||||
// Approximate number of incoming references found during marking.
|
||||
size_t incoming_refs(uint region) const { return _region_mark_stats[region]._incoming_refs; }
|
||||
size_t incoming_refs(uint region) const { return _region_mark_stats[region].incoming_refs(); }
|
||||
|
||||
// Update the TAMS for the given region to the current top.
|
||||
inline void update_top_at_mark_start(G1HeapRegion* r);
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +29,6 @@
|
||||
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
G1EvacFailureRegions::G1EvacFailureRegions() :
|
||||
@ -43,7 +43,7 @@ G1EvacFailureRegions::~G1EvacFailureRegions() {
|
||||
}
|
||||
|
||||
void G1EvacFailureRegions::pre_collection(uint max_regions) {
|
||||
AtomicAccess::store(&_num_regions_evac_failed, 0u);
|
||||
_num_regions_evac_failed.store_relaxed(0u);
|
||||
_regions_evac_failed.resize(max_regions);
|
||||
_regions_pinned.resize(max_regions);
|
||||
_regions_alloc_failed.resize(max_regions);
|
||||
@ -69,6 +69,6 @@ void G1EvacFailureRegions::par_iterate(G1HeapRegionClosure* closure,
|
||||
G1CollectedHeap::heap()->par_iterate_regions_array(closure,
|
||||
hrclaimer,
|
||||
_evac_failed_regions,
|
||||
AtomicAccess::load(&_num_regions_evac_failed),
|
||||
num_regions_evac_failed(),
|
||||
worker_id);
|
||||
}
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +26,7 @@
|
||||
#ifndef SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
|
||||
#define SHARE_GC_G1_G1EVACFAILUREREGIONS_HPP
|
||||
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class G1AbstractSubTask;
|
||||
@ -53,14 +55,14 @@ class G1EvacFailureRegions {
|
||||
// Evacuation failed regions (indexes) in the current collection.
|
||||
uint* _evac_failed_regions;
|
||||
// Number of regions evacuation failed in the current collection.
|
||||
volatile uint _num_regions_evac_failed;
|
||||
Atomic<uint> _num_regions_evac_failed;
|
||||
|
||||
public:
|
||||
G1EvacFailureRegions();
|
||||
~G1EvacFailureRegions();
|
||||
|
||||
uint get_region_idx(uint idx) const {
|
||||
assert(idx < _num_regions_evac_failed, "precondition");
|
||||
assert(idx < _num_regions_evac_failed.load_relaxed(), "precondition");
|
||||
return _evac_failed_regions[idx];
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,10 +30,9 @@
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
uint G1EvacFailureRegions::num_regions_evac_failed() const {
|
||||
return AtomicAccess::load(&_num_regions_evac_failed);
|
||||
return _num_regions_evac_failed.load_relaxed();
|
||||
}
|
||||
|
||||
bool G1EvacFailureRegions::has_regions_evac_failed() const {
|
||||
@ -57,7 +57,7 @@ bool G1EvacFailureRegions::record(uint worker_id, uint region_idx, bool cause_pi
|
||||
bool success = _regions_evac_failed.par_set_bit(region_idx,
|
||||
memory_order_relaxed);
|
||||
if (success) {
|
||||
size_t offset = AtomicAccess::fetch_then_add(&_num_regions_evac_failed, 1u);
|
||||
size_t offset = _num_regions_evac_failed.fetch_then_add(1u);
|
||||
_evac_failed_regions[offset] = region_idx;
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -122,7 +122,7 @@ public:
|
||||
ReferenceProcessor* reference_processor();
|
||||
size_t live_words(uint region_index) const {
|
||||
assert(region_index < _heap->max_num_regions(), "sanity");
|
||||
return _live_stats[region_index]._live_words;
|
||||
return _live_stats[region_index].live_words();
|
||||
}
|
||||
|
||||
void before_marking_update_attribute_table(G1HeapRegion* hr);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,6 @@
|
||||
#include "gc/shared/weakProcessor.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
class G1AdjustLiveClosure : public StackObj {
|
||||
G1AdjustClosure* _adjust_closure;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,6 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/padded.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1CodeRootSet.hpp"
|
||||
#include "gc/g1/g1CollectionSetCandidates.hpp"
|
||||
#include "gc/g1/g1FromCardCache.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,6 @@
|
||||
#include "gc/g1/g1CardSet.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1HeapRegion.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
void G1HeapRegionRemSet::set_state_untracked() {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2022, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,6 @@
|
||||
|
||||
#include "gc/g1/g1MonotonicArena.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
@ -61,13 +60,13 @@ void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first,
|
||||
size_t num,
|
||||
size_t mem_size) {
|
||||
_list.prepend(first, last);
|
||||
AtomicAccess::add(&_num_segments, num, memory_order_relaxed);
|
||||
AtomicAccess::add(&_mem_size, mem_size, memory_order_relaxed);
|
||||
_num_segments.add_then_fetch(num, memory_order_relaxed);
|
||||
_mem_size.add_then_fetch(mem_size, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void G1MonotonicArena::SegmentFreeList::print_on(outputStream* out, const char* prefix) {
|
||||
out->print_cr("%s: segments %zu size %zu",
|
||||
prefix, AtomicAccess::load(&_num_segments), AtomicAccess::load(&_mem_size));
|
||||
prefix, _num_segments.load_relaxed(), _mem_size.load_relaxed());
|
||||
}
|
||||
|
||||
G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& num_segments,
|
||||
@ -75,12 +74,12 @@ G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get_all(size_t& nu
|
||||
GlobalCounter::CriticalSection cs(Thread::current());
|
||||
|
||||
Segment* result = _list.pop_all();
|
||||
num_segments = AtomicAccess::load(&_num_segments);
|
||||
mem_size = AtomicAccess::load(&_mem_size);
|
||||
num_segments = _num_segments.load_relaxed();
|
||||
mem_size = _mem_size.load_relaxed();
|
||||
|
||||
if (result != nullptr) {
|
||||
AtomicAccess::sub(&_num_segments, num_segments, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, mem_size, memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(num_segments, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(mem_size, memory_order_relaxed);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
@ -96,8 +95,8 @@ void G1MonotonicArena::SegmentFreeList::free_all() {
|
||||
Segment::delete_segment(cur);
|
||||
}
|
||||
|
||||
AtomicAccess::sub(&_num_segments, num_freed, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, mem_size_freed, memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(num_freed, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(mem_size_freed, memory_order_relaxed);
|
||||
}
|
||||
|
||||
G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
@ -115,7 +114,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
}
|
||||
|
||||
// Install it as current allocation segment.
|
||||
Segment* old = AtomicAccess::cmpxchg(&_first, prev, next);
|
||||
Segment* old = _first.compare_exchange(prev, next);
|
||||
if (old != prev) {
|
||||
// Somebody else installed the segment, use that one.
|
||||
Segment::delete_segment(next);
|
||||
@ -126,9 +125,9 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) {
|
||||
_last = next;
|
||||
}
|
||||
// Successfully installed the segment into the list.
|
||||
AtomicAccess::inc(&_num_segments, memory_order_relaxed);
|
||||
AtomicAccess::add(&_mem_size, next->mem_size(), memory_order_relaxed);
|
||||
AtomicAccess::add(&_num_total_slots, next->num_slots(), memory_order_relaxed);
|
||||
_num_segments.add_then_fetch(1u, memory_order_relaxed);
|
||||
_mem_size.add_then_fetch(next->mem_size(), memory_order_relaxed);
|
||||
_num_total_slots.add_then_fetch(next->num_slots(), memory_order_relaxed);
|
||||
return next;
|
||||
}
|
||||
}
|
||||
@ -155,7 +154,7 @@ uint G1MonotonicArena::slot_size() const {
|
||||
}
|
||||
|
||||
void G1MonotonicArena::drop_all() {
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
|
||||
if (cur != nullptr) {
|
||||
assert(_last != nullptr, "If there is at least one segment, there must be a last one.");
|
||||
@ -175,25 +174,25 @@ void G1MonotonicArena::drop_all() {
|
||||
cur = next;
|
||||
}
|
||||
#endif
|
||||
assert(num_segments == _num_segments, "Segment count inconsistent %u %u", num_segments, _num_segments);
|
||||
assert(mem_size == _mem_size, "Memory size inconsistent");
|
||||
assert(num_segments == _num_segments.load_relaxed(), "Segment count inconsistent %u %u", num_segments, _num_segments.load_relaxed());
|
||||
assert(mem_size == _mem_size.load_relaxed(), "Memory size inconsistent");
|
||||
assert(last == _last, "Inconsistent last segment");
|
||||
|
||||
_segment_free_list->bulk_add(*first, *_last, _num_segments, _mem_size);
|
||||
_segment_free_list->bulk_add(*first, *_last, _num_segments.load_relaxed(), _mem_size.load_relaxed());
|
||||
}
|
||||
|
||||
_first = nullptr;
|
||||
_first.store_relaxed(nullptr);
|
||||
_last = nullptr;
|
||||
_num_segments = 0;
|
||||
_mem_size = 0;
|
||||
_num_total_slots = 0;
|
||||
_num_allocated_slots = 0;
|
||||
_num_segments.store_relaxed(0);
|
||||
_mem_size.store_relaxed(0);
|
||||
_num_total_slots.store_relaxed(0);
|
||||
_num_allocated_slots.store_relaxed(0);
|
||||
}
|
||||
|
||||
void* G1MonotonicArena::allocate() {
|
||||
assert(slot_size() > 0, "instance size not set.");
|
||||
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
if (cur == nullptr) {
|
||||
cur = new_segment(cur);
|
||||
}
|
||||
@ -201,7 +200,7 @@ void* G1MonotonicArena::allocate() {
|
||||
while (true) {
|
||||
void* slot = cur->allocate_slot();
|
||||
if (slot != nullptr) {
|
||||
AtomicAccess::inc(&_num_allocated_slots, memory_order_relaxed);
|
||||
_num_allocated_slots.add_then_fetch(1u, memory_order_relaxed);
|
||||
guarantee(is_aligned(slot, _alloc_options->slot_alignment()),
|
||||
"result " PTR_FORMAT " not aligned at %u", p2i(slot), _alloc_options->slot_alignment());
|
||||
return slot;
|
||||
@ -213,7 +212,7 @@ void* G1MonotonicArena::allocate() {
|
||||
}
|
||||
|
||||
uint G1MonotonicArena::num_segments() const {
|
||||
return AtomicAccess::load(&_num_segments);
|
||||
return _num_segments.load_relaxed();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -238,7 +237,7 @@ uint G1MonotonicArena::calculate_length() const {
|
||||
|
||||
template <typename SegmentClosure>
|
||||
void G1MonotonicArena::iterate_segments(SegmentClosure& closure) const {
|
||||
Segment* cur = AtomicAccess::load_acquire(&_first);
|
||||
Segment* cur = _first.load_acquire();
|
||||
|
||||
assert((cur != nullptr) == (_last != nullptr),
|
||||
"If there is at least one segment, there must be a last one");
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -28,6 +28,7 @@
|
||||
|
||||
#include "gc/shared/freeListAllocator.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/lockFreeStack.hpp"
|
||||
|
||||
@ -65,27 +66,27 @@ private:
|
||||
// AllocOptions provides parameters for Segment sizing and expansion.
|
||||
const AllocOptions* _alloc_options;
|
||||
|
||||
Segment* volatile _first; // The (start of the) list of all segments.
|
||||
Atomic<Segment*> _first; // The (start of the) list of all segments.
|
||||
Segment* _last; // The last segment of the list of all segments.
|
||||
volatile uint _num_segments; // Number of assigned segments to this allocator.
|
||||
volatile size_t _mem_size; // Memory used by all segments.
|
||||
Atomic<uint> _num_segments; // Number of assigned segments to this allocator.
|
||||
Atomic<size_t> _mem_size; // Memory used by all segments.
|
||||
|
||||
SegmentFreeList* _segment_free_list; // The global free segment list to preferentially
|
||||
// get new segments from.
|
||||
|
||||
volatile uint _num_total_slots; // Number of slots available in all segments (allocated + not yet used).
|
||||
volatile uint _num_allocated_slots; // Number of total slots allocated ever (including free and pending).
|
||||
Atomic<uint> _num_total_slots; // Number of slots available in all segments (allocated + not yet used).
|
||||
Atomic<uint> _num_allocated_slots; // Number of total slots allocated ever (including free and pending).
|
||||
|
||||
inline Segment* new_segment(Segment* const prev);
|
||||
|
||||
DEBUG_ONLY(uint calculate_length() const;)
|
||||
|
||||
public:
|
||||
const Segment* first_segment() const { return AtomicAccess::load(&_first); }
|
||||
const Segment* first_segment() const { return _first.load_relaxed(); }
|
||||
|
||||
uint num_total_slots() const { return AtomicAccess::load(&_num_total_slots); }
|
||||
uint num_total_slots() const { return _num_total_slots.load_relaxed(); }
|
||||
uint num_allocated_slots() const {
|
||||
uint allocated = AtomicAccess::load(&_num_allocated_slots);
|
||||
uint allocated = _num_allocated_slots.load_relaxed();
|
||||
assert(calculate_length() == allocated, "Must be");
|
||||
return allocated;
|
||||
}
|
||||
@ -116,11 +117,11 @@ static constexpr uint SegmentPayloadMaxAlignment = 8;
|
||||
class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment {
|
||||
const uint _slot_size;
|
||||
const uint _num_slots;
|
||||
Segment* volatile _next;
|
||||
Atomic<Segment*> _next;
|
||||
// Index into the next free slot to allocate into. Full if equal (or larger)
|
||||
// to _num_slots (can be larger because we atomically increment this value and
|
||||
// check only afterwards if the allocation has been successful).
|
||||
uint volatile _next_allocate;
|
||||
Atomic<uint> _next_allocate;
|
||||
const MemTag _mem_tag;
|
||||
|
||||
static size_t header_size() { return align_up(sizeof(Segment), SegmentPayloadMaxAlignment); }
|
||||
@ -139,21 +140,21 @@ class alignas(SegmentPayloadMaxAlignment) G1MonotonicArena::Segment {
|
||||
Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
|
||||
~Segment() = default;
|
||||
public:
|
||||
Segment* volatile* next_addr() { return &_next; }
|
||||
Atomic<Segment*>* next_addr() { return &_next; }
|
||||
|
||||
void* allocate_slot();
|
||||
|
||||
uint num_slots() const { return _num_slots; }
|
||||
|
||||
Segment* next() const { return _next; }
|
||||
Segment* next() const { return _next.load_relaxed(); }
|
||||
|
||||
void set_next(Segment* next) {
|
||||
assert(next != this, " loop condition");
|
||||
_next = next;
|
||||
_next.store_relaxed(next);
|
||||
}
|
||||
|
||||
void reset(Segment* next) {
|
||||
_next_allocate = 0;
|
||||
_next_allocate.store_relaxed(0);
|
||||
assert(next != this, " loop condition");
|
||||
set_next(next);
|
||||
memset(payload(0), 0, payload_size());
|
||||
@ -166,7 +167,7 @@ public:
|
||||
uint length() const {
|
||||
// _next_allocate might grow larger than _num_slots in multi-thread environments
|
||||
// due to races.
|
||||
return MIN2(_next_allocate, _num_slots);
|
||||
return MIN2(_next_allocate.load_relaxed(), _num_slots);
|
||||
}
|
||||
|
||||
static size_t size_in_bytes(uint slot_size, uint num_slots) {
|
||||
@ -176,7 +177,7 @@ public:
|
||||
static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag);
|
||||
static void delete_segment(Segment* segment);
|
||||
|
||||
bool is_full() const { return _next_allocate >= _num_slots; }
|
||||
bool is_full() const { return _next_allocate.load_relaxed() >= _num_slots; }
|
||||
};
|
||||
|
||||
static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment, "assert alignment of Segment (and indirectly its payload)");
|
||||
@ -186,15 +187,15 @@ static_assert(alignof(G1MonotonicArena::Segment) >= SegmentPayloadMaxAlignment,
|
||||
// performed by multiple threads concurrently.
|
||||
// Counts and memory usage are current on a best-effort basis if accessed concurrently.
|
||||
class G1MonotonicArena::SegmentFreeList {
|
||||
static Segment* volatile* next_ptr(Segment& segment) {
|
||||
static Atomic<Segment*>* next_ptr(Segment& segment) {
|
||||
return segment.next_addr();
|
||||
}
|
||||
using SegmentStack = LockFreeStack<Segment, &SegmentFreeList::next_ptr>;
|
||||
|
||||
SegmentStack _list;
|
||||
|
||||
volatile size_t _num_segments;
|
||||
volatile size_t _mem_size;
|
||||
Atomic<size_t> _num_segments;
|
||||
Atomic<size_t> _mem_size;
|
||||
|
||||
public:
|
||||
SegmentFreeList() : _list(), _num_segments(0), _mem_size(0) { }
|
||||
@ -210,8 +211,8 @@ public:
|
||||
|
||||
void print_on(outputStream* out, const char* prefix = "");
|
||||
|
||||
size_t num_segments() const { return AtomicAccess::load(&_num_segments); }
|
||||
size_t mem_size() const { return AtomicAccess::load(&_mem_size); }
|
||||
size_t num_segments() const { return _num_segments.load_relaxed(); }
|
||||
size_t mem_size() const { return _mem_size.load_relaxed(); }
|
||||
};
|
||||
|
||||
// Configuration for G1MonotonicArena, e.g slot size, slot number of next Segment.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -28,14 +28,13 @@
|
||||
|
||||
#include "gc/g1/g1MonotonicArena.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
inline void* G1MonotonicArena::Segment::allocate_slot() {
|
||||
if (_next_allocate >= _num_slots) {
|
||||
if (_next_allocate.load_relaxed() >= _num_slots) {
|
||||
return nullptr;
|
||||
}
|
||||
uint result = AtomicAccess::fetch_then_add(&_next_allocate, 1u, memory_order_relaxed);
|
||||
uint result = _next_allocate.fetch_then_add(1u, memory_order_relaxed);
|
||||
if (result >= _num_slots) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -48,8 +47,8 @@ inline G1MonotonicArena::Segment* G1MonotonicArena::SegmentFreeList::get() {
|
||||
|
||||
Segment* result = _list.pop();
|
||||
if (result != nullptr) {
|
||||
AtomicAccess::dec(&_num_segments, memory_order_relaxed);
|
||||
AtomicAccess::sub(&_mem_size, result->mem_size(), memory_order_relaxed);
|
||||
_num_segments.sub_then_fetch(1u, memory_order_relaxed);
|
||||
_mem_size.sub_then_fetch(result->mem_size(), memory_order_relaxed);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,6 @@
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/markWord.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,7 +42,6 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -943,7 +943,7 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
|
||||
phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSToYoungGenCards));
|
||||
}
|
||||
|
||||
record_pause(this_pause, start_time_sec, end_time_sec, allocation_failure);
|
||||
record_pause(this_pause, start_time_sec, end_time_sec);
|
||||
|
||||
if (G1GCPauseTypeHelper::is_last_young_pause(this_pause)) {
|
||||
assert(!G1GCPauseTypeHelper::is_concurrent_start_pause(this_pause),
|
||||
@ -1389,16 +1389,13 @@ void G1Policy::update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_t
|
||||
|
||||
void G1Policy::record_pause(G1GCPauseType gc_type,
|
||||
double start,
|
||||
double end,
|
||||
bool allocation_failure) {
|
||||
double end) {
|
||||
// Manage the MMU tracker. For some reason it ignores Full GCs.
|
||||
if (gc_type != G1GCPauseType::FullGC) {
|
||||
_mmu_tracker->add_pause(start, end);
|
||||
}
|
||||
|
||||
if (!allocation_failure) {
|
||||
update_gc_pause_time_ratios(gc_type, start, end);
|
||||
}
|
||||
|
||||
update_time_to_mixed_tracking(gc_type, start, end);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -275,8 +275,7 @@ private:
|
||||
// Record the given STW pause with the given start and end times (in s).
|
||||
void record_pause(G1GCPauseType gc_type,
|
||||
double start,
|
||||
double end,
|
||||
bool allocation_failure = false);
|
||||
double end);
|
||||
|
||||
void update_gc_pause_time_ratios(G1GCPauseType gc_type, double start_sec, double end_sec);
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/pair.hpp"
|
||||
@ -40,20 +41,23 @@
|
||||
// * the number of incoming references found during marking. This is an approximate
|
||||
// value because we do not mark through all objects.
|
||||
struct G1RegionMarkStats {
|
||||
size_t _live_words;
|
||||
size_t _incoming_refs;
|
||||
Atomic<size_t> _live_words;
|
||||
Atomic<size_t> _incoming_refs;
|
||||
|
||||
// Clear all members.
|
||||
void clear() {
|
||||
_live_words = 0;
|
||||
_incoming_refs = 0;
|
||||
_live_words.store_relaxed(0);
|
||||
_incoming_refs.store_relaxed(0);
|
||||
}
|
||||
// Clear all members after a marking overflow. Only needs to clear the number of
|
||||
// incoming references as all objects will be rescanned, while the live words are
|
||||
// gathered whenever a thread can mark an object, which is synchronized.
|
||||
void clear_during_overflow() {
|
||||
_incoming_refs = 0;
|
||||
_incoming_refs.store_relaxed(0);
|
||||
}
|
||||
|
||||
size_t live_words() const { return _live_words.load_relaxed(); }
|
||||
size_t incoming_refs() const { return _incoming_refs.load_relaxed(); }
|
||||
};
|
||||
|
||||
// Per-marking thread cache for the region mark statistics.
|
||||
@ -112,12 +116,16 @@ public:
|
||||
void add_live_words(oop obj);
|
||||
void add_live_words(uint region_idx, size_t live_words) {
|
||||
G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
|
||||
cur->_stats._live_words += live_words;
|
||||
// This method is only ever called single-threaded, so we do not need atomic
|
||||
// update here.
|
||||
cur->_stats._live_words.store_relaxed(cur->_stats.live_words() + live_words);
|
||||
}
|
||||
|
||||
void inc_incoming_refs(uint region_idx) {
|
||||
G1RegionMarkStatsCacheEntry* const cur = find_for_add(region_idx);
|
||||
cur->_stats._incoming_refs++;
|
||||
// This method is only ever called single-threaded, so we do not need atomic
|
||||
// update here.
|
||||
cur->_stats._incoming_refs.store_relaxed(cur->_stats.incoming_refs() + 1u);
|
||||
}
|
||||
|
||||
void reset(uint region_idx) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,8 +27,6 @@
|
||||
|
||||
#include "gc/g1/g1RegionMarkStatsCache.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
|
||||
inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCache::find_for_add(uint region_idx) {
|
||||
uint const cache_idx = hash(region_idx);
|
||||
|
||||
@ -46,12 +44,12 @@ inline G1RegionMarkStatsCache::G1RegionMarkStatsCacheEntry* G1RegionMarkStatsCac
|
||||
|
||||
inline void G1RegionMarkStatsCache::evict(uint idx) {
|
||||
G1RegionMarkStatsCacheEntry* cur = &_cache[idx];
|
||||
if (cur->_stats._live_words != 0) {
|
||||
AtomicAccess::add(&_target[cur->_region_idx]._live_words, cur->_stats._live_words);
|
||||
if (cur->_stats.live_words() != 0) {
|
||||
_target[cur->_region_idx]._live_words.add_then_fetch(cur->_stats.live_words());
|
||||
}
|
||||
|
||||
if (cur->_stats._incoming_refs != 0) {
|
||||
AtomicAccess::add(&_target[cur->_region_idx]._incoming_refs, cur->_stats._incoming_refs);
|
||||
if (cur->_stats.incoming_refs() != 0) {
|
||||
_target[cur->_region_idx]._incoming_refs.add_then_fetch(cur->_stats.incoming_refs());
|
||||
}
|
||||
|
||||
cur->clear();
|
||||
|
||||
@ -96,8 +96,22 @@ void WorkerThreads::initialize_workers() {
|
||||
}
|
||||
}
|
||||
|
||||
bool WorkerThreads::allow_inject_creation_failure() const {
|
||||
if (!is_init_completed()) {
|
||||
// Never allow creation failures during VM init
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_created_workers == 0) {
|
||||
// Never allow creation failures of the first worker, it will cause the VM to exit
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
WorkerThread* WorkerThreads::create_worker(uint name_suffix) {
|
||||
if (is_init_completed() && InjectGCWorkerCreationFailure) {
|
||||
if (InjectGCWorkerCreationFailure && allow_inject_creation_failure()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -104,6 +104,7 @@ public:
|
||||
WorkerThreads(const char* name, uint max_workers);
|
||||
|
||||
void initialize_workers();
|
||||
bool allow_inject_creation_failure() const;
|
||||
|
||||
uint max_workers() const { return _max_workers; }
|
||||
uint created_workers() const { return _created_workers; }
|
||||
|
||||
@ -129,6 +129,13 @@ protected:
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void update_livedata(size_t live) {
|
||||
_region_union._live_data = live;
|
||||
#ifdef ASSERT
|
||||
_union_tag = is_live_data;
|
||||
#endif
|
||||
}
|
||||
|
||||
inline ShenandoahHeapRegion* get_region() const {
|
||||
assert(_union_tag != is_uninitialized, "Cannot fetch region from uninitialized RegionData");
|
||||
return _region;
|
||||
|
||||
@ -89,6 +89,17 @@ bool ShenandoahOldHeuristics::prime_collection_set(ShenandoahCollectionSet* coll
|
||||
return false;
|
||||
}
|
||||
|
||||
// Between consecutive mixed-evacuation cycles, the live data within each candidate region may change due to
|
||||
// promotions and old-gen evacuations. Re-sort the candidate regions in order to first evacuate regions that have
|
||||
// the smallest amount of live data. These are easiest to evacuate with least effort. Doing these first allows
|
||||
// us to more quickly replenish free memory with empty regions.
|
||||
for (uint i = _next_old_collection_candidate; i < _last_old_collection_candidate; i++) {
|
||||
ShenandoahHeapRegion* r = _region_data[i].get_region();
|
||||
_region_data[i].update_livedata(r->get_mixed_candidate_live_data_bytes());
|
||||
}
|
||||
QuickSort::sort<RegionData>(_region_data + _next_old_collection_candidate, unprocessed_old_collection_candidates(),
|
||||
compare_by_live);
|
||||
|
||||
_first_pinned_candidate = NOT_FOUND;
|
||||
|
||||
uint included_old_regions = 0;
|
||||
@ -414,6 +425,8 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
ShenandoahHeapRegion* r = candidates[i].get_region();
|
||||
size_t region_garbage = r->garbage();
|
||||
size_t region_free = r->free();
|
||||
|
||||
r->capture_mixed_candidate_garbage();
|
||||
candidates_garbage += region_garbage;
|
||||
unfragmented += region_free;
|
||||
}
|
||||
@ -456,6 +469,8 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() {
|
||||
r->index(), ShenandoahHeapRegion::region_state_to_string(r->state()));
|
||||
const size_t region_garbage = r->garbage();
|
||||
const size_t region_free = r->free();
|
||||
|
||||
r->capture_mixed_candidate_garbage();
|
||||
candidates_garbage += region_garbage;
|
||||
unfragmented += region_free;
|
||||
defrag_count++;
|
||||
|
||||
@ -75,6 +75,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c
|
||||
_plab_allocs(0),
|
||||
_live_data(0),
|
||||
_critical_pins(0),
|
||||
_mixed_candidate_garbage_words(0),
|
||||
_update_watermark(start),
|
||||
_age(0),
|
||||
#ifdef SHENANDOAH_CENSUS_NOISE
|
||||
@ -565,6 +566,7 @@ void ShenandoahHeapRegion::recycle_internal() {
|
||||
assert(_recycling.is_set() && is_trash(), "Wrong state");
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
|
||||
_mixed_candidate_garbage_words = 0;
|
||||
set_top(bottom());
|
||||
clear_live_data();
|
||||
reset_alloc_metadata();
|
||||
|
||||
@ -43,6 +43,7 @@ class ShenandoahHeapRegion {
|
||||
friend class VMStructs;
|
||||
friend class ShenandoahHeapRegionStateConstant;
|
||||
private:
|
||||
|
||||
/*
|
||||
Region state is described by a state machine. Transitions are guarded by
|
||||
heap lock, which allows changing the state of several regions atomically.
|
||||
@ -259,6 +260,8 @@ private:
|
||||
volatile size_t _live_data;
|
||||
volatile size_t _critical_pins;
|
||||
|
||||
size_t _mixed_candidate_garbage_words;
|
||||
|
||||
HeapWord* volatile _update_watermark;
|
||||
|
||||
uint _age;
|
||||
@ -398,6 +401,14 @@ public:
|
||||
// above TAMS.
|
||||
inline size_t get_live_data_words() const;
|
||||
|
||||
inline size_t get_mixed_candidate_live_data_bytes() const;
|
||||
inline size_t get_mixed_candidate_live_data_words() const;
|
||||
|
||||
inline void capture_mixed_candidate_garbage();
|
||||
|
||||
// Returns garbage by calculating difference between used and get_live_data_words. The value returned is only
|
||||
// meaningful immediately following completion of marking. If there have been subsequent allocations in this region,
|
||||
// use a different approach to determine garbage, such as (used() - get_mixed_candidate_live_data_bytes())
|
||||
inline size_t garbage() const;
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
|
||||
@ -163,6 +163,23 @@ inline size_t ShenandoahHeapRegion::get_live_data_bytes() const {
|
||||
return get_live_data_words() * HeapWordSize;
|
||||
}
|
||||
|
||||
inline size_t ShenandoahHeapRegion::get_mixed_candidate_live_data_bytes() const {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
assert(used() >= _mixed_candidate_garbage_words * HeapWordSize, "used must exceed garbage");
|
||||
return used() - _mixed_candidate_garbage_words * HeapWordSize;
|
||||
}
|
||||
|
||||
inline size_t ShenandoahHeapRegion::get_mixed_candidate_live_data_words() const {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
assert(used() >= _mixed_candidate_garbage_words * HeapWordSize, "used must exceed garbage");
|
||||
return used() / HeapWordSize - _mixed_candidate_garbage_words;
|
||||
}
|
||||
|
||||
inline void ShenandoahHeapRegion::capture_mixed_candidate_garbage() {
|
||||
shenandoah_assert_heaplocked_or_safepoint();
|
||||
_mixed_candidate_garbage_words = garbage() / HeapWordSize;
|
||||
}
|
||||
|
||||
inline bool ShenandoahHeapRegion::has_live() const {
|
||||
return get_live_data_words() != 0;
|
||||
}
|
||||
|
||||
@ -66,10 +66,6 @@
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#ifdef LINUX
|
||||
#include "os_linux.hpp"
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#define NO_TRANSITION(result_type, header) extern "C" { result_type JNICALL header {
|
||||
#define NO_TRANSITION_END } }
|
||||
@ -400,35 +396,18 @@ JVM_ENTRY_NO_ENV(jboolean, jfr_is_class_instrumented(JNIEnv* env, jclass jvm, jc
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jboolean, jfr_is_containerized(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
return OSContainer::is_containerized();
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
return os::is_containerized();
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_memory(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
// We want the host memory, not the container limit.
|
||||
// os::physical_memory() would return the container limit.
|
||||
return static_cast<jlong>(os::Linux::physical_memory());
|
||||
#else
|
||||
return static_cast<jlong>(os::physical_memory());
|
||||
#endif
|
||||
return static_cast<jlong>(os::Machine::physical_memory());
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(jlong, jfr_host_total_swap_memory(JNIEnv* env, jclass jvm))
|
||||
#ifdef LINUX
|
||||
// We want the host swap memory, not the container value.
|
||||
physical_memory_size_type host_swap = 0;
|
||||
(void)os::Linux::host_swap(host_swap); // Discard return value and treat as no swap
|
||||
return static_cast<jlong>(host_swap);
|
||||
#else
|
||||
physical_memory_size_type total_swap_space = 0;
|
||||
// Return value ignored - defaulting to 0 on failure.
|
||||
(void)os::total_swap_space(total_swap_space);
|
||||
(void)os::Machine::total_swap_space(total_swap_space);
|
||||
return static_cast<jlong>(total_swap_space);
|
||||
#endif
|
||||
JVM_END
|
||||
|
||||
JVM_ENTRY_NO_ENV(void, jfr_emit_data_loss(JNIEnv* env, jclass jvm, jlong bytes))
|
||||
|
||||
@ -1489,8 +1489,7 @@ Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) {
|
||||
}
|
||||
Node *if_f = _gvn.transform(new IfFalseNode(iff));
|
||||
Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
|
||||
Node* halt = _gvn.transform(new HaltNode(if_f, frame, "unexpected null in intrinsic"));
|
||||
C->root()->add_req(halt);
|
||||
halt(if_f, frame, "unexpected null in intrinsic");
|
||||
Node *if_t = _gvn.transform(new IfTrueNode(iff));
|
||||
set_control(if_t);
|
||||
return cast_not_null(value, do_replace_in_map);
|
||||
@ -2073,6 +2072,12 @@ void GraphKit::increment_counter(Node* counter_addr) {
|
||||
store_to_memory(ctrl, counter_addr, incr, T_LONG, MemNode::unordered);
|
||||
}
|
||||
|
||||
void GraphKit::halt(Node* ctrl, Node* frameptr, const char* reason, bool generate_code_in_product) {
|
||||
Node* halt = new HaltNode(ctrl, frameptr, reason
|
||||
PRODUCT_ONLY(COMMA generate_code_in_product));
|
||||
halt = _gvn.transform(halt);
|
||||
root()->add_req(halt);
|
||||
}
|
||||
|
||||
//------------------------------uncommon_trap----------------------------------
|
||||
// Bail out to the interpreter in mid-method. Implemented by calling the
|
||||
@ -2195,11 +2200,15 @@ Node* GraphKit::uncommon_trap(int trap_request,
|
||||
// The debug info is the only real input to this call.
|
||||
|
||||
// Halt-and-catch fire here. The above call should never return!
|
||||
HaltNode* halt = new HaltNode(control(), frameptr(), "uncommon trap returned which should never happen"
|
||||
PRODUCT_ONLY(COMMA /*reachable*/false));
|
||||
_gvn.set_type_bottom(halt);
|
||||
root()->add_req(halt);
|
||||
|
||||
// We only emit code for the HaltNode in debug, which is enough for
|
||||
// verifying correctness. In product, we don't want to emit it so
|
||||
// that we can save on code space. HaltNode often get folded because
|
||||
// the compiler can prove that the unreachable path is dead. But we
|
||||
// cannot generally expect that for uncommon traps, which are often
|
||||
// reachable and occasionally taken.
|
||||
halt(control(), frameptr(),
|
||||
"uncommon trap returned which should never happen",
|
||||
false /* don't emit code in product */);
|
||||
stop_and_kill_map();
|
||||
return call;
|
||||
}
|
||||
|
||||
@ -709,6 +709,8 @@ class GraphKit : public Phase {
|
||||
void increment_counter(address counter_addr); // increment a debug counter
|
||||
void increment_counter(Node* counter_addr); // increment a debug counter
|
||||
|
||||
void halt(Node* ctrl, Node* frameptr, const char* reason, bool generate_code_in_product = true);
|
||||
|
||||
// Bail out to the interpreter right now
|
||||
// The optional klass is the one causing the trap.
|
||||
// The optional reason is debug information written to the compile log.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -327,6 +327,8 @@ public:
|
||||
// e.g. Op_ vector nodes and other intrinsics while guarding with vlen
|
||||
static bool match_rule_supported_vector(int opcode, int vlen, BasicType bt);
|
||||
|
||||
// Returns true if the platform efficiently implements the given masked vector
|
||||
// operation using predicate features, false otherwise.
|
||||
static bool match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt);
|
||||
|
||||
// Determines if a vector operation needs to be partially implemented with a mask
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2024, 2025, Alibaba Group Holding Limited. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -845,7 +845,8 @@ public:
|
||||
Flag_has_swapped_edges = 1ULL << 11,
|
||||
Flag_is_scheduled = 1ULL << 12,
|
||||
Flag_is_expensive = 1ULL << 13,
|
||||
Flag_is_predicated_vector = 1ULL << 14,
|
||||
Flag_is_predicated_vector = 1ULL << 14, // Marked on a vector node that has an additional
|
||||
// mask input controlling the lane operations.
|
||||
Flag_for_post_loop_opts_igvn = 1ULL << 15,
|
||||
Flag_for_merge_stores_igvn = 1ULL << 16,
|
||||
Flag_is_removed_by_peephole = 1ULL << 17,
|
||||
|
||||
@ -1229,8 +1229,7 @@ void Parse::do_method_entry() {
|
||||
Node* not_subtype_ctrl = gen_subtype_check(receiver_obj, holder_klass);
|
||||
assert(!stopped(), "not a subtype");
|
||||
|
||||
Node* halt = _gvn.transform(new HaltNode(not_subtype_ctrl, frameptr(), "failed receiver subtype check"));
|
||||
C->root()->add_req(halt);
|
||||
halt(not_subtype_ctrl, frameptr(), "failed receiver subtype check");
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -147,7 +147,7 @@ public:
|
||||
static const Type* int_type_xmeet(const CT* i1, const Type* t2);
|
||||
|
||||
template <class CTP>
|
||||
static CTP int_type_union(CTP t1, CTP t2) {
|
||||
static auto int_type_union(CTP t1, CTP t2) {
|
||||
using CT = std::conditional_t<std::is_pointer_v<CTP>, std::remove_pointer_t<CTP>, CTP>;
|
||||
using S = std::remove_const_t<decltype(CT::_lo)>;
|
||||
using U = std::remove_const_t<decltype(CT::_ulo)>;
|
||||
@ -209,7 +209,7 @@ public:
|
||||
KnownBits<U> _bits;
|
||||
int _widen = 0; // dummy field to mimic the same field in TypeInt, useful in testing
|
||||
|
||||
static TypeIntMirror make(const TypeIntPrototype<S, U>& t, int widen) {
|
||||
static TypeIntMirror make(const TypeIntPrototype<S, U>& t, int widen = 0) {
|
||||
auto canonicalized_t = t.canonicalize_constraints();
|
||||
assert(!canonicalized_t.empty(), "must not be empty");
|
||||
return TypeIntMirror{canonicalized_t._data._srange._lo, canonicalized_t._data._srange._hi,
|
||||
@ -217,11 +217,15 @@ public:
|
||||
canonicalized_t._data._bits};
|
||||
}
|
||||
|
||||
TypeIntMirror meet(const TypeIntMirror& o) const {
|
||||
return TypeIntHelper::int_type_union(this, &o);
|
||||
}
|
||||
|
||||
// These allow TypeIntMirror to mimick the behaviors of TypeInt* and TypeLong*, so they can be
|
||||
// passed into RangeInference methods. These are only used in testing, so they are implemented in
|
||||
// the test file.
|
||||
static TypeIntMirror make(const TypeIntMirror& t, int widen);
|
||||
const TypeIntMirror* operator->() const;
|
||||
TypeIntMirror meet(const TypeIntMirror& o) const;
|
||||
bool contains(U u) const;
|
||||
bool contains(const TypeIntMirror& o) const;
|
||||
bool operator==(const TypeIntMirror& o) const;
|
||||
@ -322,7 +326,7 @@ private:
|
||||
// Infer a result given the input types of a binary operation
|
||||
template <class CTP, class Inference>
|
||||
static CTP infer_binary(CTP t1, CTP t2, Inference infer) {
|
||||
CTP res;
|
||||
TypeIntMirror<S<CTP>, U<CTP>> res;
|
||||
bool is_init = false;
|
||||
|
||||
SimpleIntervalIterable<CTP> t1_simple_intervals(t1);
|
||||
@ -330,10 +334,10 @@ private:
|
||||
|
||||
for (auto& st1 : t1_simple_intervals) {
|
||||
for (auto& st2 : t2_simple_intervals) {
|
||||
CTP current = infer(st1, st2);
|
||||
TypeIntMirror<S<CTP>, U<CTP>> current = infer(st1, st2);
|
||||
|
||||
if (is_init) {
|
||||
res = res->meet(current)->template cast<CT<CTP>>();
|
||||
res = res.meet(current);
|
||||
} else {
|
||||
is_init = true;
|
||||
res = current;
|
||||
@ -342,7 +346,22 @@ private:
|
||||
}
|
||||
|
||||
assert(is_init, "must be initialized");
|
||||
return res;
|
||||
// It is important that widen is computed on the whole result instead of during each step. This
|
||||
// is because we normalize the widen of small Type instances to 0, so computing the widen value
|
||||
// for each step and taking the union of them may return a widen value that conflicts with
|
||||
// other computations, trigerring the monotonicity assert during CCP.
|
||||
//
|
||||
// For example, let us consider the operation r = x ^ y:
|
||||
// - During the first step of CCP, type(x) = {0}, type(y) = [-2, 2], w = 3.
|
||||
// Since x is a constant that is the identity element of the xor operation, type(r) = type(y) = [-2, 2], w = 3
|
||||
// - During the second step, type(x) is widened to [0, 2], w = 0.
|
||||
// We then compute the range for:
|
||||
// r1 = x ^ y1, type(x) = [0, 2], w = 0, type(y1) = [0, 2], w = 0
|
||||
// r2 = x ^ y2, type(x) = [0, 2], w = 0, type(y2) = [-2, -1], w = 0
|
||||
// This results in type(r1) = [0, 3], w = 0, and type(r2) = [-4, -1], w = 0
|
||||
// So the union of type(r1) and type(r2) is [-4, 3], w = 0. This widen value is smaller than
|
||||
// that of the previous step, triggering the monotonicity assert.
|
||||
return CT<CTP>::make(res, MAX2(t1->_widen, t2->_widen));
|
||||
}
|
||||
|
||||
public:
|
||||
@ -357,7 +376,7 @@ public:
|
||||
U<CTP> uhi = MIN2(st1._uhi, st2._uhi);
|
||||
U<CTP> zeros = st1._bits._zeros | st2._bits._zeros;
|
||||
U<CTP> ones = st1._bits._ones & st2._bits._ones;
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
return TypeIntMirror<S<CTP>, U<CTP>>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}});
|
||||
});
|
||||
}
|
||||
|
||||
@ -372,7 +391,7 @@ public:
|
||||
U<CTP> uhi = std::numeric_limits<U<CTP>>::max();
|
||||
U<CTP> zeros = st1._bits._zeros & st2._bits._zeros;
|
||||
U<CTP> ones = st1._bits._ones | st2._bits._ones;
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
return TypeIntMirror<S<CTP>, U<CTP>>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}});
|
||||
});
|
||||
}
|
||||
|
||||
@ -385,7 +404,7 @@ public:
|
||||
U<CTP> uhi = std::numeric_limits<U<CTP>>::max();
|
||||
U<CTP> zeros = (st1._bits._zeros & st2._bits._zeros) | (st1._bits._ones & st2._bits._ones);
|
||||
U<CTP> ones = (st1._bits._zeros & st2._bits._ones) | (st1._bits._ones & st2._bits._zeros);
|
||||
return CT<CTP>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen));
|
||||
return TypeIntMirror<S<CTP>, U<CTP>>::make(TypeIntPrototype<S<CTP>, U<CTP>>{{lo, hi}, {ulo, uhi}, {zeros, ones}});
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -2443,6 +2443,12 @@ const TypeVect* TypeVect::make(BasicType elem_bt, uint length, bool is_mask) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Create a vector mask type with the given element basic type and length.
|
||||
// - Returns "TypeVectMask" (PVectMask) for platforms that support the predicate
|
||||
// feature and it is implemented properly in the backend, allowing the mask to
|
||||
// be stored in a predicate/mask register.
|
||||
// - Returns a normal vector type "TypeVectA ~ TypeVectZ" (NVectMask) otherwise,
|
||||
// where the vector mask is stored in a vector register.
|
||||
const TypeVect* TypeVect::makemask(BasicType elem_bt, uint length) {
|
||||
if (Matcher::has_predicated_vectors() &&
|
||||
Matcher::match_rule_supported_vector_masked(Op_VectorLoadMask, length, elem_bt)) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -799,6 +799,9 @@ public:
|
||||
static const TypeInt* make(jint lo, jint hi, int widen);
|
||||
static const Type* make_or_top(const TypeIntPrototype<jint, juint>& t, int widen);
|
||||
static const TypeInt* make(const TypeIntPrototype<jint, juint>& t, int widen) { return make_or_top(t, widen)->is_int(); }
|
||||
static const TypeInt* make(const TypeIntMirror<jint, juint>& t, int widen) {
|
||||
return (new TypeInt(TypeIntPrototype<jint, juint>{{t._lo, t._hi}, {t._ulo, t._uhi}, t._bits}, widen, false))->hashcons()->is_int();
|
||||
}
|
||||
|
||||
// Check for single integer
|
||||
bool is_con() const { return _lo == _hi; }
|
||||
@ -881,6 +884,9 @@ public:
|
||||
static const TypeLong* make(jlong lo, jlong hi, int widen);
|
||||
static const Type* make_or_top(const TypeIntPrototype<jlong, julong>& t, int widen);
|
||||
static const TypeLong* make(const TypeIntPrototype<jlong, julong>& t, int widen) { return make_or_top(t, widen)->is_long(); }
|
||||
static const TypeLong* make(const TypeIntMirror<jlong, julong>& t, int widen) {
|
||||
return (new TypeLong(TypeIntPrototype<jlong, julong>{{t._lo, t._hi}, {t._ulo, t._uhi}, t._bits}, widen, false))->hashcons()->is_long();
|
||||
}
|
||||
|
||||
// Check for single integer
|
||||
bool is_con() const { return _lo == _hi; }
|
||||
@ -1012,7 +1018,7 @@ public:
|
||||
};
|
||||
|
||||
//------------------------------TypeVect---------------------------------------
|
||||
// Class of Vector Types
|
||||
// Basic class of vector (mask) types.
|
||||
class TypeVect : public Type {
|
||||
const BasicType _elem_bt; // Vector's element type
|
||||
const uint _length; // Elements in vector (power of 2)
|
||||
@ -1052,6 +1058,16 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
// TypeVect subclasses representing vectors or vector masks with "BVectMask" or "NVectMask"
|
||||
// layout (see vectornode.hpp for detailed notes on vector mask representations), mapped
|
||||
// to vector registers and distinguished by vector register size:
|
||||
//
|
||||
// - TypeVectA: Scalable vector type (variable size, e.g., AArch64 SVE, RISC-V RVV)
|
||||
// - TypeVectS: 32-bit vector type
|
||||
// - TypeVectD: 64-bit vector type
|
||||
// - TypeVectX: 128-bit vector type
|
||||
// - TypeVectY: 256-bit vector type
|
||||
// - TypeVectZ: 512-bit vector type
|
||||
class TypeVectA : public TypeVect {
|
||||
friend class TypeVect;
|
||||
TypeVectA(BasicType elem_bt, uint length) : TypeVect(VectorA, elem_bt, length) {}
|
||||
@ -1082,6 +1098,9 @@ class TypeVectZ : public TypeVect {
|
||||
TypeVectZ(BasicType elem_bt, uint length) : TypeVect(VectorZ, elem_bt, length) {}
|
||||
};
|
||||
|
||||
// Class of TypeVectMask, representing vector masks with "PVectMask" layout (see
|
||||
// vectornode.hpp for detailed notes on vector mask representations), mapped to
|
||||
// dedicated hardware predicate/mask registers.
|
||||
class TypeVectMask : public TypeVect {
|
||||
public:
|
||||
friend class TypeVect;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2020, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#include "ci/ciSymbols.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "opto/library_call.hpp"
|
||||
#include "opto/rootnode.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
#include "opto/vectornode.hpp"
|
||||
#include "prims/vectorSupport.hpp"
|
||||
@ -2330,6 +2331,21 @@ bool LibraryCallKit::inline_vector_convert() {
|
||||
Node* op = opd1;
|
||||
if (is_cast) {
|
||||
assert(!is_mask || num_elem_from == num_elem_to, "vector mask cast needs the same elem num");
|
||||
|
||||
// Make sure the precondition of VectorCastNode::opcode holds: we can only have
|
||||
// unsigned casts for integral types (excluding long). VectorAPI code is not
|
||||
// expected to violate this at runtime, but we may compile unreachable code
|
||||
// where such impossible combinations arise.
|
||||
if (is_ucast && (!is_integral_type(elem_bt_from) || elem_bt_from == T_LONG)) {
|
||||
// Halt-and-catch fire here. This condition should never happen at runtime.
|
||||
stringStream ss;
|
||||
ss.print("impossible combination: unsigned vector cast from %s", type2name(elem_bt_from));
|
||||
halt(control(), frameptr(), ss.as_string(C->comp_arena()));
|
||||
stop_and_kill_map();
|
||||
log_if_needed(" ** impossible combination: unsigned cast from %s", type2name(elem_bt_from));
|
||||
return true;
|
||||
}
|
||||
|
||||
int cast_vopc = VectorCastNode::opcode(-1, elem_bt_from, !is_ucast);
|
||||
|
||||
// Make sure that vector cast is implemented to particular type/size combination if it is
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -115,9 +115,6 @@
|
||||
#if INCLUDE_MANAGEMENT
|
||||
#include "services/finalizerService.hpp"
|
||||
#endif
|
||||
#ifdef LINUX
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
@ -500,11 +497,9 @@ JVM_LEAF(jboolean, JVM_IsUseContainerSupport(void))
|
||||
JVM_END
|
||||
|
||||
JVM_LEAF(jboolean, JVM_IsContainerized(void))
|
||||
#ifdef LINUX
|
||||
if (OSContainer::is_containerized()) {
|
||||
if (os::is_containerized()) {
|
||||
return JNI_TRUE;
|
||||
}
|
||||
#endif
|
||||
return JNI_FALSE;
|
||||
JVM_END
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -129,7 +129,6 @@
|
||||
#ifdef LINUX
|
||||
#include "cgroupSubsystem_linux.hpp"
|
||||
#include "os_linux.hpp"
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#define CHECK_JNI_EXCEPTION_(env, value) \
|
||||
@ -2582,14 +2581,12 @@ WB_ENTRY(jboolean, WB_CheckLibSpecifiesNoexecstack(JNIEnv* env, jobject o, jstri
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsContainerized(JNIEnv* env, jobject o))
|
||||
LINUX_ONLY(return OSContainer::is_containerized();)
|
||||
return false;
|
||||
return os::is_containerized();
|
||||
WB_END
|
||||
|
||||
// Physical memory of the host machine (including containers)
|
||||
WB_ENTRY(jlong, WB_HostPhysicalMemory(JNIEnv* env, jobject o))
|
||||
LINUX_ONLY(return static_cast<jlong>(os::Linux::physical_memory());)
|
||||
return static_cast<jlong>(os::physical_memory());
|
||||
return static_cast<jlong>(os::Machine::physical_memory());
|
||||
WB_END
|
||||
|
||||
// Available memory of the host machine (container-aware)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,10 +81,6 @@
|
||||
#include "utilities/permitForbiddenFunctions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
#ifdef LINUX
|
||||
#include "osContainer_linux.hpp"
|
||||
#endif
|
||||
|
||||
#ifndef _WINDOWS
|
||||
# include <poll.h>
|
||||
#endif
|
||||
@ -2205,11 +2201,14 @@ static void assert_nonempty_range(const char* addr, size_t bytes) {
|
||||
}
|
||||
|
||||
bool os::used_memory(physical_memory_size_type& value) {
|
||||
#ifdef LINUX
|
||||
if (OSContainer::is_containerized()) {
|
||||
return OSContainer::memory_usage_in_bytes(value);
|
||||
if (is_containerized()) {
|
||||
return Container::used_memory(value);
|
||||
}
|
||||
#endif
|
||||
|
||||
return Machine::used_memory(value);
|
||||
}
|
||||
|
||||
bool os::Machine::used_memory(physical_memory_size_type& value) {
|
||||
physical_memory_size_type avail_mem = 0;
|
||||
// Return value ignored - defaulting to 0 on failure.
|
||||
(void)os::available_memory(avail_mem);
|
||||
@ -2218,6 +2217,44 @@ bool os::used_memory(physical_memory_size_type& value) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef LINUX
|
||||
bool os::is_containerized() {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::processor_count(double& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::available_memory(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::used_memory(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::total_swap_space(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::free_swap_space(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_limit(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_soft_limit(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool os::Container::memory_throttle_limit(physical_memory_size_type& value) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
bool os::commit_memory(char* addr, size_t bytes, bool executable) {
|
||||
assert_nonempty_range(addr, bytes);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -342,6 +342,52 @@ class os: AllStatic {
|
||||
static bool is_server_class_machine();
|
||||
static size_t rss();
|
||||
|
||||
// On platforms with container support (currently only Linux) we combine machine values with
|
||||
// potential container values in os:: methods, abstracting which value is actually used.
|
||||
// The os::Machine and os::Container classes and containing methods are used to get machine
|
||||
// and container values (when available) separately.
|
||||
static bool is_containerized();
|
||||
|
||||
// The os::Machine class reports system resource metrics from the perspective of the operating
|
||||
// system, without considering container-imposed limits. The values returned by these methods
|
||||
// reflect the resources visible to the process as reported by the OS, and may already be
|
||||
// affected by mechanisms such as virtualization, hypervisor limits, or process affinity,
|
||||
// but do NOT consider further restrictions imposed by container runtimes (e.g., cgroups)
|
||||
class Machine : AllStatic {
|
||||
public:
|
||||
static int active_processor_count();
|
||||
|
||||
[[nodiscard]] static bool available_memory(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool used_memory(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool free_memory(physical_memory_size_type& value);
|
||||
|
||||
[[nodiscard]] static bool total_swap_space(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool free_swap_space(physical_memory_size_type& value);
|
||||
|
||||
static physical_memory_size_type physical_memory();
|
||||
};
|
||||
|
||||
// The os::Container class reports resource limits as imposed by a supported container runtime
|
||||
// (currently only cgroup-based Linux runtimes). If the process is running inside a
|
||||
// containerized environment, methods from this class report the effective limits imposed
|
||||
// by the container, which may be more restrictive than what os::Machine reports.
|
||||
// Methods return true and set the out-parameter if a limit is found,
|
||||
// or false if no limit exists or it cannot be determined.
|
||||
class Container : AllStatic {
|
||||
public:
|
||||
[[nodiscard]] static bool processor_count(double& value); // Returns the core-equivalent CPU quota
|
||||
|
||||
[[nodiscard]] static bool available_memory(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool used_memory(physical_memory_size_type& value);
|
||||
|
||||
[[nodiscard]] static bool total_swap_space(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool free_swap_space(physical_memory_size_type& value);
|
||||
|
||||
[[nodiscard]] static bool memory_limit(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool memory_soft_limit(physical_memory_size_type& value);
|
||||
[[nodiscard]] static bool memory_throttle_limit(physical_memory_size_type& value);
|
||||
};
|
||||
|
||||
// Returns the id of the processor on which the calling thread is currently executing.
|
||||
// The returned value is guaranteed to be between 0 and (os::processor_count() - 1).
|
||||
static uint processor_id();
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -36,6 +36,7 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
@ -82,7 +83,7 @@ Thread::Thread(MemTag mem_tag) {
|
||||
_threads_hazard_ptr = nullptr;
|
||||
_threads_list_ptr = nullptr;
|
||||
_nested_threads_hazard_ptr_cnt = 0;
|
||||
_rcu_counter = 0;
|
||||
_rcu_counter.store_relaxed(0);
|
||||
|
||||
// the handle mark links itself to last_handle_mark
|
||||
new HandleMark(this);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -30,6 +30,7 @@
|
||||
#include "gc/shared/threadLocalAllocBuffer.hpp"
|
||||
#include "jni.h"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
@ -238,9 +239,9 @@ class Thread: public ThreadShadow {
|
||||
|
||||
// Support for GlobalCounter
|
||||
private:
|
||||
volatile uintx _rcu_counter;
|
||||
Atomic<uintx> _rcu_counter;
|
||||
public:
|
||||
volatile uintx* get_rcu_counter() {
|
||||
Atomic<uintx>* get_rcu_counter() {
|
||||
return &_rcu_counter;
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,7 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "memory/iterator.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
#include "runtime/threadSMR.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
@ -41,7 +41,7 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure {
|
||||
SpinYield yield;
|
||||
// Loops on this thread until it has exited the critical read section.
|
||||
while(true) {
|
||||
uintx cnt = AtomicAccess::load_acquire(thread->get_rcu_counter());
|
||||
uintx cnt = thread->get_rcu_counter()->load_acquire();
|
||||
// This checks if the thread's counter is active. And if so is the counter
|
||||
// for a pre-existing reader (belongs to this grace period). A pre-existing
|
||||
// reader will have a lower counter than the global counter version for this
|
||||
@ -57,9 +57,9 @@ class GlobalCounter::CounterThreadCheck : public ThreadClosure {
|
||||
};
|
||||
|
||||
void GlobalCounter::write_synchronize() {
|
||||
assert((*Thread::current()->get_rcu_counter() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
|
||||
// AtomicAccess::add must provide fence since we have storeload dependency.
|
||||
uintx gbl_cnt = AtomicAccess::add(&_global_counter._counter, COUNTER_INCREMENT);
|
||||
assert((Thread::current()->get_rcu_counter()->load_relaxed() & COUNTER_ACTIVE) == 0x0, "must be outside a critcal section");
|
||||
// Atomic add must provide fence since we have storeload dependency.
|
||||
uintx gbl_cnt = _global_counter._counter.add_then_fetch(COUNTER_INCREMENT);
|
||||
|
||||
// Do all RCU threads.
|
||||
CounterThreadCheck ctc(gbl_cnt);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "memory/padded.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
class Thread;
|
||||
|
||||
@ -47,7 +48,7 @@ class GlobalCounter : public AllStatic {
|
||||
// counter is on a separate cacheline.
|
||||
struct PaddedCounter {
|
||||
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_PADDING_SIZE, 0);
|
||||
volatile uintx _counter;
|
||||
Atomic<uintx> _counter;
|
||||
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, sizeof(volatile uintx));
|
||||
};
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,30 +27,29 @@
|
||||
|
||||
#include "utilities/globalCounter.hpp"
|
||||
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/javaThread.hpp"
|
||||
|
||||
inline GlobalCounter::CSContext
|
||||
GlobalCounter::critical_section_begin(Thread *thread) {
|
||||
assert(thread == Thread::current(), "must be current thread");
|
||||
uintx old_cnt = AtomicAccess::load(thread->get_rcu_counter());
|
||||
uintx old_cnt = thread->get_rcu_counter()->load_relaxed();
|
||||
// Retain the old counter value if already active, e.g. nested.
|
||||
// Otherwise, set the counter to the current version + active bit.
|
||||
uintx new_cnt = old_cnt;
|
||||
if ((new_cnt & COUNTER_ACTIVE) == 0) {
|
||||
new_cnt = AtomicAccess::load(&_global_counter._counter) | COUNTER_ACTIVE;
|
||||
new_cnt = _global_counter._counter.load_relaxed() | COUNTER_ACTIVE;
|
||||
}
|
||||
AtomicAccess::release_store_fence(thread->get_rcu_counter(), new_cnt);
|
||||
thread->get_rcu_counter()->release_store_fence(new_cnt);
|
||||
return static_cast<CSContext>(old_cnt);
|
||||
}
|
||||
|
||||
inline void
|
||||
GlobalCounter::critical_section_end(Thread *thread, CSContext context) {
|
||||
assert(thread == Thread::current(), "must be current thread");
|
||||
assert((*thread->get_rcu_counter() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
|
||||
assert((thread->get_rcu_counter()->load_relaxed() & COUNTER_ACTIVE) == COUNTER_ACTIVE, "must be in critical section");
|
||||
// Restore the counter value from before the associated begin.
|
||||
AtomicAccess::release_store(thread->get_rcu_counter(),
|
||||
static_cast<uintx>(context));
|
||||
thread->get_rcu_counter()->release_store(static_cast<uintx>(context));
|
||||
}
|
||||
|
||||
class GlobalCounter::CriticalSection {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -87,9 +87,8 @@ import java.util.function.Supplier;
|
||||
* is thrown. Hence, a lazy constant can never hold a {@code null} value. Clients who
|
||||
* want to use a nullable constant can wrap the value into an {@linkplain Optional} holder.
|
||||
* <p>
|
||||
* If the computing function recursively invokes itself (directly or indirectly via
|
||||
* the lazy constant), an {@linkplain IllegalStateException} is thrown, and the lazy
|
||||
* constant is not initialized.
|
||||
* If the computing function recursively invokes itself via the lazy constant, an
|
||||
* {@linkplain IllegalStateException} is thrown, and the lazy constant is not initialized.
|
||||
*
|
||||
* <h2 id="composition">Composing lazy constants</h2>
|
||||
* A lazy constant can depend on other lazy constants, forming a dependency graph
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1224,7 +1224,7 @@ public interface List<E> extends SequencedCollection<E> {
|
||||
* The returned list and its {@link List#subList(int, int) subList()} or
|
||||
* {@link List#reversed()} views implement the {@link RandomAccess} interface.
|
||||
* <p>
|
||||
* If the provided computing function recursively calls itself or the returned
|
||||
* If the provided computing function recursively calls itself via the returned
|
||||
* lazy list for the same index, an {@linkplain IllegalStateException}
|
||||
* will be thrown.
|
||||
* <p>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1777,7 +1777,7 @@ public interface Map<K, V> {
|
||||
* The values of any {@link Map#values()} or {@link Map#entrySet()} views of
|
||||
* the returned map are also lazily computed.
|
||||
* <p>
|
||||
* If the provided computing function recursively calls itself or
|
||||
* If the provided computing function recursively calls itself via
|
||||
* the returned lazy map for the same key, an {@linkplain IllegalStateException}
|
||||
* will be thrown.
|
||||
* <p>
|
||||
|
||||
254
src/java.base/share/classes/sun/security/ssl/DHasKEM.java
Normal file
254
src/java.base/share/classes/sun/security/ssl/DHasKEM.java
Normal file
@ -0,0 +1,254 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.security.ssl;
|
||||
|
||||
import sun.security.util.ArrayUtil;
|
||||
import sun.security.util.CurveDB;
|
||||
import sun.security.util.ECUtil;
|
||||
import sun.security.util.NamedCurve;
|
||||
|
||||
import javax.crypto.DecapsulateException;
|
||||
import javax.crypto.KEM;
|
||||
import javax.crypto.KEMSpi;
|
||||
import javax.crypto.KeyAgreement;
|
||||
import javax.crypto.SecretKey;
|
||||
import java.io.IOException;
|
||||
import java.math.BigInteger;
|
||||
import java.security.*;
|
||||
import java.security.interfaces.ECKey;
|
||||
import java.security.interfaces.ECPublicKey;
|
||||
import java.security.interfaces.XECKey;
|
||||
import java.security.interfaces.XECPublicKey;
|
||||
import java.security.spec.AlgorithmParameterSpec;
|
||||
import java.security.spec.ECPoint;
|
||||
import java.security.spec.ECPublicKeySpec;
|
||||
import java.security.spec.InvalidKeySpecException;
|
||||
import java.security.spec.KeySpec;
|
||||
import java.security.spec.NamedParameterSpec;
|
||||
import java.security.spec.XECPublicKeySpec;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
* The DHasKEM class presents a KEM abstraction layer over traditional
|
||||
* DH-based key exchange, which can be used for either straight
|
||||
* ECDH/XDH or TLS hybrid key exchanges.
|
||||
*
|
||||
* This class can be alongside standard full post-quantum KEMs
|
||||
* when hybrid implementations are required.
|
||||
*/
|
||||
public class DHasKEM implements KEMSpi {
|
||||
|
||||
@Override
|
||||
public EncapsulatorSpi engineNewEncapsulator(
|
||||
PublicKey publicKey, AlgorithmParameterSpec spec,
|
||||
SecureRandom secureRandom) throws InvalidKeyException {
|
||||
return new Handler(publicKey, null, secureRandom);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DecapsulatorSpi engineNewDecapsulator(PrivateKey privateKey,
|
||||
AlgorithmParameterSpec spec) throws InvalidKeyException {
|
||||
return new Handler(null, privateKey, null);
|
||||
}
|
||||
|
||||
private static final class Handler
|
||||
implements KEMSpi.EncapsulatorSpi, KEMSpi.DecapsulatorSpi {
|
||||
private final PublicKey pkR;
|
||||
private final PrivateKey skR;
|
||||
private final SecureRandom sr;
|
||||
private final Params params;
|
||||
|
||||
Handler(PublicKey pk, PrivateKey sk, SecureRandom sr)
|
||||
throws InvalidKeyException {
|
||||
this.pkR = pk;
|
||||
this.skR = sk;
|
||||
this.sr = sr;
|
||||
this.params = paramsFromKey(pk == null ? sk : pk);
|
||||
}
|
||||
|
||||
@Override
|
||||
public KEM.Encapsulated engineEncapsulate(int from, int to,
|
||||
String algorithm) {
|
||||
KeyPair kpE = params.generateKeyPair(sr);
|
||||
PrivateKey skE = kpE.getPrivate();
|
||||
PublicKey pkE = kpE.getPublic();
|
||||
byte[] pkEm = params.SerializePublicKey(pkE);
|
||||
try {
|
||||
SecretKey dh = params.DH(algorithm, skE, pkR);
|
||||
return new KEM.Encapsulated(
|
||||
sub(dh, from, to),
|
||||
pkEm, null);
|
||||
} catch (Exception e) {
|
||||
throw new ProviderException("internal error", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int engineSecretSize() {
|
||||
return params.secretLen;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int engineEncapsulationSize() {
|
||||
return params.publicKeyLen;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SecretKey engineDecapsulate(byte[] encapsulation, int from,
|
||||
int to, String algorithm) throws DecapsulateException {
|
||||
if (encapsulation.length != params.publicKeyLen) {
|
||||
throw new DecapsulateException("incorrect encapsulation size");
|
||||
}
|
||||
try {
|
||||
PublicKey pkE = params.DeserializePublicKey(encapsulation);
|
||||
SecretKey dh = params.DH(algorithm, skR, pkE);
|
||||
return sub(dh, from, to);
|
||||
} catch (IOException | InvalidKeyException e) {
|
||||
throw new DecapsulateException("Cannot decapsulate", e);
|
||||
} catch (Exception e) {
|
||||
throw new ProviderException("internal error", e);
|
||||
}
|
||||
}
|
||||
|
||||
private SecretKey sub(SecretKey key, int from, int to) {
|
||||
if (from == 0 && to == params.secretLen) {
|
||||
return key;
|
||||
}
|
||||
|
||||
// Key slicing should never happen. Otherwise, there might be
|
||||
// a programming error.
|
||||
throw new AssertionError(
|
||||
"Unexpected key slicing: from=" + from + ", to=" + to);
|
||||
}
|
||||
|
||||
// This KEM is designed to be able to represent every ECDH and XDH
|
||||
private Params paramsFromKey(Key k) throws InvalidKeyException {
|
||||
if (k instanceof ECKey eckey) {
|
||||
if (ECUtil.equals(eckey.getParams(), CurveDB.P_256)) {
|
||||
return Params.P256;
|
||||
} else if (ECUtil.equals(eckey.getParams(), CurveDB.P_384)) {
|
||||
return Params.P384;
|
||||
} else if (ECUtil.equals(eckey.getParams(), CurveDB.P_521)) {
|
||||
return Params.P521;
|
||||
}
|
||||
} else if (k instanceof XECKey xkey
|
||||
&& xkey.getParams() instanceof NamedParameterSpec ns) {
|
||||
if (ns.getName().equalsIgnoreCase(
|
||||
NamedParameterSpec.X25519.getName())) {
|
||||
return Params.X25519;
|
||||
} else if (ns.getName().equalsIgnoreCase(
|
||||
NamedParameterSpec.X448.getName())) {
|
||||
return Params.X448;
|
||||
}
|
||||
}
|
||||
throw new InvalidKeyException("Unsupported key");
|
||||
}
|
||||
}
|
||||
|
||||
private enum Params {
|
||||
|
||||
P256(32, 2 * 32 + 1,
|
||||
"ECDH", "EC", CurveDB.P_256),
|
||||
|
||||
P384(48, 2 * 48 + 1,
|
||||
"ECDH", "EC", CurveDB.P_384),
|
||||
|
||||
P521(66, 2 * 66 + 1,
|
||||
"ECDH", "EC", CurveDB.P_521),
|
||||
|
||||
X25519(32, 32,
|
||||
"XDH", "XDH", NamedParameterSpec.X25519),
|
||||
|
||||
X448(56, 56,
|
||||
"XDH", "XDH", NamedParameterSpec.X448);
|
||||
|
||||
private final int secretLen;
|
||||
private final int publicKeyLen;
|
||||
private final String kaAlgorithm;
|
||||
private final String keyAlgorithm;
|
||||
private final AlgorithmParameterSpec spec;
|
||||
|
||||
Params(int secretLen, int publicKeyLen, String kaAlgorithm,
|
||||
String keyAlgorithm, AlgorithmParameterSpec spec) {
|
||||
this.spec = spec;
|
||||
this.secretLen = secretLen;
|
||||
this.publicKeyLen = publicKeyLen;
|
||||
this.kaAlgorithm = kaAlgorithm;
|
||||
this.keyAlgorithm = keyAlgorithm;
|
||||
}
|
||||
|
||||
private boolean isEC() {
|
||||
return this == P256 || this == P384 || this == P521;
|
||||
}
|
||||
|
||||
private KeyPair generateKeyPair(SecureRandom sr) {
|
||||
try {
|
||||
KeyPairGenerator g = KeyPairGenerator.getInstance(keyAlgorithm);
|
||||
g.initialize(spec, sr);
|
||||
return g.generateKeyPair();
|
||||
} catch (Exception e) {
|
||||
throw new ProviderException("internal error", e);
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] SerializePublicKey(PublicKey k) {
|
||||
if (isEC()) {
|
||||
ECPoint w = ((ECPublicKey) k).getW();
|
||||
return ECUtil.encodePoint(w, ((NamedCurve) spec).getCurve());
|
||||
} else {
|
||||
byte[] uArray = ((XECPublicKey) k).getU().toByteArray();
|
||||
ArrayUtil.reverse(uArray);
|
||||
return Arrays.copyOf(uArray, publicKeyLen);
|
||||
}
|
||||
}
|
||||
|
||||
private PublicKey DeserializePublicKey(byte[] data) throws
|
||||
IOException, NoSuchAlgorithmException,
|
||||
InvalidKeySpecException {
|
||||
KeySpec keySpec;
|
||||
if (isEC()) {
|
||||
NamedCurve curve = (NamedCurve) this.spec;
|
||||
keySpec = new ECPublicKeySpec(
|
||||
ECUtil.decodePoint(data, curve.getCurve()), curve);
|
||||
} else {
|
||||
data = data.clone();
|
||||
ArrayUtil.reverse(data);
|
||||
keySpec = new XECPublicKeySpec(
|
||||
this.spec, new BigInteger(1, data));
|
||||
}
|
||||
return KeyFactory.getInstance(keyAlgorithm).
|
||||
generatePublic(keySpec);
|
||||
}
|
||||
|
||||
private SecretKey DH(String alg, PrivateKey skE, PublicKey pkR)
|
||||
throws NoSuchAlgorithmException, InvalidKeyException {
|
||||
KeyAgreement ka = KeyAgreement.getInstance(kaAlgorithm);
|
||||
ka.init(skE);
|
||||
ka.doPhase(pkR, true);
|
||||
return ka.generateSecret(alg);
|
||||
}
|
||||
}
|
||||
}
|
||||
474
src/java.base/share/classes/sun/security/ssl/Hybrid.java
Normal file
474
src/java.base/share/classes/sun/security/ssl/Hybrid.java
Normal file
@ -0,0 +1,474 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.security.ssl;
|
||||
|
||||
import sun.security.util.ArrayUtil;
|
||||
import sun.security.util.CurveDB;
|
||||
import sun.security.util.ECUtil;
|
||||
import sun.security.util.RawKeySpec;
|
||||
import sun.security.x509.X509Key;
|
||||
|
||||
import javax.crypto.DecapsulateException;
|
||||
import javax.crypto.KEM;
|
||||
import javax.crypto.KEMSpi;
|
||||
import javax.crypto.SecretKey;
|
||||
import java.math.BigInteger;
|
||||
import java.security.InvalidAlgorithmParameterException;
|
||||
import java.security.InvalidKeyException;
|
||||
import java.security.Key;
|
||||
import java.security.KeyFactory;
|
||||
import java.security.KeyFactorySpi;
|
||||
import java.security.KeyPair;
|
||||
import java.security.KeyPairGenerator;
|
||||
import java.security.KeyPairGeneratorSpi;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.security.PrivateKey;
|
||||
import java.security.ProviderException;
|
||||
import java.security.PublicKey;
|
||||
import java.security.SecureRandom;
|
||||
import java.security.spec.*;
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
|
||||
// The Hybrid class wraps two underlying algorithms (left and right sides)
|
||||
// in a single TLS hybrid named group.
|
||||
// It implements:
|
||||
// - Hybrid KeyPair generation
|
||||
// - Hybrid KeyFactory for decoding concatenated hybrid public keys
|
||||
// - Hybrid KEM implementation for performing encapsulation and
|
||||
// decapsulation over two underlying algorithms (traditional
|
||||
// algorithm and post-quantum KEM algorithm)
|
||||
|
||||
public class Hybrid {
|
||||
|
||||
public static final NamedParameterSpec X25519_MLKEM768 =
|
||||
new NamedParameterSpec("X25519MLKEM768");
|
||||
|
||||
public static final NamedParameterSpec SECP256R1_MLKEM768 =
|
||||
new NamedParameterSpec("SecP256r1MLKEM768");
|
||||
|
||||
public static final NamedParameterSpec SECP384R1_MLKEM1024 =
|
||||
new NamedParameterSpec("SecP384r1MLKEM1024");
|
||||
|
||||
private static AlgorithmParameterSpec getSpec(String name) {
|
||||
if (name.startsWith("secp")) {
|
||||
return new ECGenParameterSpec(name);
|
||||
} else {
|
||||
return new NamedParameterSpec(name);
|
||||
}
|
||||
}
|
||||
|
||||
private static KeyPairGenerator getKeyPairGenerator(String name) throws
|
||||
NoSuchAlgorithmException {
|
||||
if (name.startsWith("secp")) {
|
||||
name = "EC";
|
||||
}
|
||||
return KeyPairGenerator.getInstance(name);
|
||||
}
|
||||
|
||||
private static KeyFactory getKeyFactory(String name) throws
|
||||
NoSuchAlgorithmException {
|
||||
if (name.startsWith("secp")) {
|
||||
name = "EC";
|
||||
}
|
||||
return KeyFactory.getInstance(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a KEM instance for each side of the hybrid algorithm.
|
||||
* For traditional key exchange algorithms, we use the DH-based KEM
|
||||
* implementation provided by DHasKEM class.
|
||||
* For ML-KEM post-quantum algorithms, we obtain a KEM instance
|
||||
* with "ML-KEM". This is done to work with 3rd-party providers that
|
||||
* only have "ML-KEM" KEM algorithm.
|
||||
*/
|
||||
private static KEM getKEM(String name) throws NoSuchAlgorithmException {
|
||||
if (name.startsWith("secp") || name.equals("X25519")) {
|
||||
return KEM.getInstance("DH", HybridProvider.PROVIDER);
|
||||
} else {
|
||||
return KEM.getInstance("ML-KEM");
|
||||
}
|
||||
}
|
||||
|
||||
public static class KeyPairGeneratorImpl extends KeyPairGeneratorSpi {
|
||||
private final KeyPairGenerator left;
|
||||
private final KeyPairGenerator right;
|
||||
private final AlgorithmParameterSpec leftSpec;
|
||||
private final AlgorithmParameterSpec rightSpec;
|
||||
|
||||
public KeyPairGeneratorImpl(String leftAlg, String rightAlg)
|
||||
throws NoSuchAlgorithmException {
|
||||
left = getKeyPairGenerator(leftAlg);
|
||||
right = getKeyPairGenerator(rightAlg);
|
||||
leftSpec = getSpec(leftAlg);
|
||||
rightSpec = getSpec(rightAlg);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(AlgorithmParameterSpec params,
|
||||
SecureRandom random)
|
||||
throws InvalidAlgorithmParameterException {
|
||||
left.initialize(leftSpec, random);
|
||||
right.initialize(rightSpec, random);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void initialize(int keysize, SecureRandom random) {
|
||||
// NO-OP (do nothing)
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyPair generateKeyPair() {
|
||||
var kp1 = left.generateKeyPair();
|
||||
var kp2 = right.generateKeyPair();
|
||||
return new KeyPair(
|
||||
new PublicKeyImpl("Hybrid", kp1.getPublic(),
|
||||
kp2.getPublic()),
|
||||
new PrivateKeyImpl("Hybrid", kp1.getPrivate(),
|
||||
kp2.getPrivate()));
|
||||
}
|
||||
}
|
||||
|
||||
public static class KeyFactoryImpl extends KeyFactorySpi {
|
||||
private final KeyFactory left;
|
||||
private final KeyFactory right;
|
||||
private final int leftlen;
|
||||
private final String leftname;
|
||||
private final String rightname;
|
||||
|
||||
public KeyFactoryImpl(String left, String right)
|
||||
throws NoSuchAlgorithmException {
|
||||
this.left = getKeyFactory(left);
|
||||
this.right = getKeyFactory(right);
|
||||
this.leftlen = leftPublicLength(left);
|
||||
this.leftname = left;
|
||||
this.rightname = right;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PublicKey engineGeneratePublic(KeySpec keySpec)
|
||||
throws InvalidKeySpecException {
|
||||
if (keySpec == null) {
|
||||
throw new InvalidKeySpecException("keySpec must not be null");
|
||||
}
|
||||
|
||||
if (keySpec instanceof RawKeySpec rks) {
|
||||
byte[] key = rks.getKeyArr();
|
||||
if (key == null) {
|
||||
throw new InvalidKeySpecException(
|
||||
"RawkeySpec contains null key data");
|
||||
}
|
||||
if (key.length <= leftlen) {
|
||||
throw new InvalidKeySpecException(
|
||||
"Hybrid key length " + key.length +
|
||||
" is too short and its left key length is " +
|
||||
leftlen);
|
||||
}
|
||||
|
||||
byte[] leftKeyBytes = Arrays.copyOfRange(key, 0, leftlen);
|
||||
byte[] rightKeyBytes = Arrays.copyOfRange(key, leftlen,
|
||||
key.length);
|
||||
PublicKey leftKey, rightKey;
|
||||
|
||||
try {
|
||||
if (leftname.startsWith("secp")) {
|
||||
var curve = CurveDB.lookup(leftname);
|
||||
var ecSpec = new ECPublicKeySpec(
|
||||
ECUtil.decodePoint(leftKeyBytes,
|
||||
curve.getCurve()), curve);
|
||||
leftKey = left.generatePublic(ecSpec);
|
||||
} else if (leftname.startsWith("ML-KEM")) {
|
||||
leftKey = left.generatePublic(new RawKeySpec(
|
||||
leftKeyBytes));
|
||||
} else {
|
||||
throw new InvalidKeySpecException("Unsupported left" +
|
||||
" algorithm" + leftname);
|
||||
}
|
||||
|
||||
if (rightname.equals("X25519")) {
|
||||
ArrayUtil.reverse(rightKeyBytes);
|
||||
var xecSpec = new XECPublicKeySpec(
|
||||
new NamedParameterSpec(rightname),
|
||||
new BigInteger(1, rightKeyBytes));
|
||||
rightKey = right.generatePublic(xecSpec);
|
||||
} else if (rightname.startsWith("ML-KEM")) {
|
||||
rightKey = right.generatePublic(new RawKeySpec(
|
||||
rightKeyBytes));
|
||||
} else {
|
||||
throw new InvalidKeySpecException("Unsupported right" +
|
||||
" algorithm: " + rightname);
|
||||
}
|
||||
|
||||
return new PublicKeyImpl("Hybrid", leftKey, rightKey);
|
||||
} catch (Exception e) {
|
||||
throw new InvalidKeySpecException("Failed to decode " +
|
||||
"hybrid key", e);
|
||||
}
|
||||
}
|
||||
|
||||
throw new InvalidKeySpecException(
|
||||
"KeySpec type:" +
|
||||
keySpec.getClass().getName() + " not supported");
|
||||
}
|
||||
|
||||
private static int leftPublicLength(String name) {
|
||||
return switch (name.toLowerCase(Locale.ROOT)) {
|
||||
case "secp256r1" -> 65;
|
||||
case "secp384r1" -> 97;
|
||||
case "ml-kem-768" -> 1184;
|
||||
default -> throw new IllegalArgumentException(
|
||||
"Unknown named group: " + name);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected PrivateKey engineGeneratePrivate(KeySpec keySpec) throws
|
||||
InvalidKeySpecException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected <T extends KeySpec> T engineGetKeySpec(Key key,
|
||||
Class<T> keySpec) throws InvalidKeySpecException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Key engineTranslateKey(Key key) throws InvalidKeyException {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
|
||||
public static class KEMImpl implements KEMSpi {
|
||||
private final KEM left;
|
||||
private final KEM right;
|
||||
|
||||
public KEMImpl(String left, String right)
|
||||
throws NoSuchAlgorithmException {
|
||||
this.left = getKEM(left);
|
||||
this.right = getKEM(right);
|
||||
}
|
||||
|
||||
@Override
|
||||
public EncapsulatorSpi engineNewEncapsulator(PublicKey publicKey,
|
||||
AlgorithmParameterSpec spec, SecureRandom secureRandom) throws
|
||||
InvalidAlgorithmParameterException, InvalidKeyException {
|
||||
if (publicKey instanceof PublicKeyImpl pk) {
|
||||
return new Handler(left.newEncapsulator(pk.left, secureRandom),
|
||||
right.newEncapsulator(pk.right, secureRandom),
|
||||
null, null);
|
||||
}
|
||||
throw new InvalidKeyException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DecapsulatorSpi engineNewDecapsulator(PrivateKey privateKey,
|
||||
AlgorithmParameterSpec spec)
|
||||
throws InvalidAlgorithmParameterException, InvalidKeyException {
|
||||
if (privateKey instanceof PrivateKeyImpl pk) {
|
||||
return new Handler(null, null, left.newDecapsulator(pk.left),
|
||||
right.newDecapsulator(pk.right));
|
||||
}
|
||||
throw new InvalidKeyException();
|
||||
}
|
||||
}
|
||||
|
||||
private static byte[] concat(byte[]... inputs) {
|
||||
int outLen = 0;
|
||||
for (byte[] in : inputs) {
|
||||
outLen += in.length;
|
||||
}
|
||||
byte[] out = new byte[outLen];
|
||||
int pos = 0;
|
||||
for (byte[] in : inputs) {
|
||||
System.arraycopy(in, 0, out, pos, in.length);
|
||||
pos += in.length;
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
private record Handler(KEM.Encapsulator le, KEM.Encapsulator re,
|
||||
KEM.Decapsulator ld, KEM.Decapsulator rd)
|
||||
implements KEMSpi.EncapsulatorSpi, KEMSpi.DecapsulatorSpi {
|
||||
@Override
|
||||
public KEM.Encapsulated engineEncapsulate(int from, int to,
|
||||
String algorithm) {
|
||||
int expectedSecretSize = engineSecretSize();
|
||||
if (!(from == 0 && to == expectedSecretSize)) {
|
||||
throw new IllegalArgumentException(
|
||||
"Invalid range for encapsulation: from = " + from +
|
||||
" to = " + to + ", expected total secret size = " +
|
||||
expectedSecretSize);
|
||||
}
|
||||
|
||||
var left = le.encapsulate();
|
||||
var right = re.encapsulate();
|
||||
return new KEM.Encapsulated(
|
||||
new SecretKeyImpl(left.key(), right.key()),
|
||||
concat(left.encapsulation(), right.encapsulation()),
|
||||
null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int engineSecretSize() {
|
||||
if (le != null) {
|
||||
return le.secretSize() + re.secretSize();
|
||||
} else {
|
||||
return ld.secretSize() + rd.secretSize();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int engineEncapsulationSize() {
|
||||
if (le != null) {
|
||||
return le.encapsulationSize() + re.encapsulationSize();
|
||||
} else {
|
||||
return ld.encapsulationSize() + rd.encapsulationSize();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public SecretKey engineDecapsulate(byte[] encapsulation, int from,
|
||||
int to, String algorithm) throws DecapsulateException {
|
||||
int expectedEncSize = engineEncapsulationSize();
|
||||
if (encapsulation.length != expectedEncSize) {
|
||||
throw new IllegalArgumentException(
|
||||
"Invalid key encapsulation message length: " +
|
||||
encapsulation.length +
|
||||
", expected = " + expectedEncSize);
|
||||
}
|
||||
|
||||
int expectedSecretSize = engineSecretSize();
|
||||
if (!(from == 0 && to == expectedSecretSize)) {
|
||||
throw new IllegalArgumentException(
|
||||
"Invalid range for decapsulation: from = " + from +
|
||||
" to = " + to + ", expected total secret size = " +
|
||||
expectedSecretSize);
|
||||
}
|
||||
|
||||
var left = Arrays.copyOf(encapsulation, ld.encapsulationSize());
|
||||
var right = Arrays.copyOfRange(encapsulation,
|
||||
ld.encapsulationSize(), encapsulation.length);
|
||||
return new SecretKeyImpl(
|
||||
ld.decapsulate(left),
|
||||
rd.decapsulate(right)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Package-private
|
||||
record SecretKeyImpl(SecretKey k1, SecretKey k2)
|
||||
implements SecretKey {
|
||||
@Override
|
||||
public String getAlgorithm() {
|
||||
return "Generic";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getFormat() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] getEncoded() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Hybrid public key combines two underlying public keys (left and right).
|
||||
* Public keys can be transmitted/encoded because the hybrid protocol
|
||||
* requires the public component to be sent.
|
||||
*/
|
||||
// Package-private
|
||||
record PublicKeyImpl(String algorithm, PublicKey left,
|
||||
PublicKey right) implements PublicKey {
|
||||
@Override
|
||||
public String getAlgorithm() {
|
||||
return algorithm;
|
||||
}
|
||||
|
||||
// getFormat() returns "RAW" as hybrid key uses RAW concatenation
|
||||
// of underlying encodings.
|
||||
@Override
|
||||
public String getFormat() {
|
||||
return "RAW";
|
||||
}
|
||||
|
||||
// getEncoded() returns the concatenation of the encoded bytes of the
|
||||
// left and right public keys.
|
||||
@Override
|
||||
public byte[] getEncoded() {
|
||||
return concat(onlyKey(left), onlyKey(right));
|
||||
}
|
||||
|
||||
static byte[] onlyKey(PublicKey key) {
|
||||
if (key instanceof X509Key xk) {
|
||||
return xk.getKeyAsBytes();
|
||||
}
|
||||
|
||||
// Fallback for 3rd-party providers
|
||||
if (!"X.509".equalsIgnoreCase(key.getFormat())) {
|
||||
throw new ProviderException("Invalid public key encoding " +
|
||||
"format");
|
||||
}
|
||||
var xk = new X509Key();
|
||||
try {
|
||||
xk.decode(key.getEncoded());
|
||||
} catch (InvalidKeyException e) {
|
||||
throw new ProviderException("Invalid public key encoding", e);
|
||||
}
|
||||
return xk.getKeyAsBytes();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Hybrid private key combines two underlying private keys (left and right).
|
||||
* It is for internal use only. The private keys should never be exported.
|
||||
*/
|
||||
private record PrivateKeyImpl(String algorithm, PrivateKey left,
|
||||
PrivateKey right) implements PrivateKey {
|
||||
|
||||
@Override
|
||||
public String getAlgorithm() {
|
||||
return algorithm;
|
||||
}
|
||||
|
||||
// getFormat() returns null because there is no standard
|
||||
// format for a hybrid private key.
|
||||
@Override
|
||||
public String getFormat() {
|
||||
return null;
|
||||
}
|
||||
|
||||
// getEncoded() returns an empty byte array because there is no
|
||||
// standard encoding format for a hybrid private key.
|
||||
@Override
|
||||
public byte[] getEncoded() {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
130
src/java.base/share/classes/sun/security/ssl/HybridProvider.java
Normal file
130
src/java.base/share/classes/sun/security/ssl/HybridProvider.java
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
package sun.security.ssl;
|
||||
|
||||
import java.security.Provider;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static sun.security.util.SecurityConstants.PROVIDER_VER;
|
||||
|
||||
// This is an internal provider used in the JSSE code for DH-as-KEM
|
||||
// and Hybrid KEM support. It doesn't actually get installed in the
|
||||
// system's list of security providers that is searched at runtime.
|
||||
// JSSE loads this provider internally.
|
||||
// It registers Hybrid KeyPairGenerator, KeyFactory, and KEM
|
||||
// implementations for hybrid named groups as Provider services.
|
||||
|
||||
public class HybridProvider {
|
||||
|
||||
public static final Provider PROVIDER = new ProviderImpl();
|
||||
|
||||
private static final class ProviderImpl extends Provider {
|
||||
@java.io.Serial
|
||||
private static final long serialVersionUID = 0L;
|
||||
|
||||
ProviderImpl() {
|
||||
super("HybridAndDHAsKEM", PROVIDER_VER,
|
||||
"Hybrid and DHAsKEM provider");
|
||||
put("KEM.DH", DHasKEM.class.getName());
|
||||
|
||||
// Hybrid KeyPairGenerator/KeyFactory/KEM
|
||||
|
||||
// The order of shares in the concatenation for group name
|
||||
// X25519MLKEM768 has been reversed as per the current
|
||||
// draft RFC.
|
||||
var attrs = Map.of("name", "X25519MLKEM768", "left", "ML-KEM-768",
|
||||
"right", "X25519");
|
||||
putService(new HybridService(this, "KeyPairGenerator",
|
||||
"X25519MLKEM768",
|
||||
"sun.security.ssl.Hybrid$KeyPairGeneratorImpl",
|
||||
null, attrs));
|
||||
putService(new HybridService(this, "KEM",
|
||||
"X25519MLKEM768",
|
||||
"sun.security.ssl.Hybrid$KEMImpl",
|
||||
null, attrs));
|
||||
putService(new HybridService(this, "KeyFactory",
|
||||
"X25519MLKEM768",
|
||||
"sun.security.ssl.Hybrid$KeyFactoryImpl",
|
||||
null, attrs));
|
||||
|
||||
attrs = Map.of("name", "SecP256r1MLKEM768", "left", "secp256r1",
|
||||
"right", "ML-KEM-768");
|
||||
putService(new HybridService(this, "KeyPairGenerator",
|
||||
"SecP256r1MLKEM768",
|
||||
"sun.security.ssl.Hybrid$KeyPairGeneratorImpl",
|
||||
null, attrs));
|
||||
putService(new HybridService(this, "KEM",
|
||||
"SecP256r1MLKEM768",
|
||||
"sun.security.ssl.Hybrid$KEMImpl",
|
||||
null, attrs));
|
||||
putService(new HybridService(this, "KeyFactory",
|
||||
"SecP256r1MLKEM768",
|
||||
"sun.security.ssl.Hybrid$KeyFactoryImpl",
|
||||
null, attrs));
|
||||
|
||||
attrs = Map.of("name", "SecP384r1MLKEM1024", "left", "secp384r1",
|
||||
"right", "ML-KEM-1024");
|
||||
putService(new HybridService(this, "KeyPairGenerator",
|
||||
"SecP384r1MLKEM1024",
|
||||
"sun.security.ssl.Hybrid$KeyPairGeneratorImpl",
|
||||
null, attrs));
|
||||
putService(new HybridService(this, "KEM",
|
||||
"SecP384r1MLKEM1024",
|
||||
"sun.security.ssl.Hybrid$KEMImpl",
|
||||
null, attrs));
|
||||
putService(new HybridService(this, "KeyFactory",
|
||||
"SecP384r1MLKEM1024",
|
||||
"sun.security.ssl.Hybrid$KeyFactoryImpl",
|
||||
null, attrs));
|
||||
}
|
||||
}
|
||||
|
||||
private static class HybridService extends Provider.Service {
|
||||
|
||||
HybridService(Provider p, String type, String algo, String cn,
|
||||
List<String> aliases, Map<String, String> attrs) {
|
||||
super(p, type, algo, cn, aliases, attrs);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object newInstance(Object ctrParamObj)
|
||||
throws NoSuchAlgorithmException {
|
||||
String type = getType();
|
||||
return switch (type) {
|
||||
case "KeyPairGenerator" -> new Hybrid.KeyPairGeneratorImpl(
|
||||
getAttribute("left"), getAttribute("right"));
|
||||
case "KeyFactory" -> new Hybrid.KeyFactoryImpl(
|
||||
getAttribute("left"), getAttribute("right"));
|
||||
case "KEM" -> new Hybrid.KEMImpl(
|
||||
getAttribute("left"), getAttribute("right"));
|
||||
default -> throw new NoSuchAlgorithmException(
|
||||
"Unexpected value: " + type);
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -24,7 +24,10 @@
|
||||
*/
|
||||
package sun.security.ssl;
|
||||
|
||||
import sun.security.util.RawKeySpec;
|
||||
|
||||
import javax.crypto.KDF;
|
||||
import javax.crypto.KEM;
|
||||
import javax.crypto.KeyAgreement;
|
||||
import javax.crypto.SecretKey;
|
||||
import javax.crypto.spec.HKDFParameterSpec;
|
||||
@ -32,9 +35,11 @@ import javax.net.ssl.SSLHandshakeException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.KeyFactory;
|
||||
import java.security.PrivateKey;
|
||||
import java.security.Provider;
|
||||
import java.security.PublicKey;
|
||||
import java.security.spec.AlgorithmParameterSpec;
|
||||
import java.security.SecureRandom;
|
||||
import sun.security.util.KeyUtil;
|
||||
|
||||
/**
|
||||
@ -46,15 +51,32 @@ public class KAKeyDerivation implements SSLKeyDerivation {
|
||||
private final HandshakeContext context;
|
||||
private final PrivateKey localPrivateKey;
|
||||
private final PublicKey peerPublicKey;
|
||||
private final byte[] keyshare;
|
||||
private final Provider provider;
|
||||
|
||||
// Constructor called by Key Agreement
|
||||
KAKeyDerivation(String algorithmName,
|
||||
HandshakeContext context,
|
||||
PrivateKey localPrivateKey,
|
||||
PublicKey peerPublicKey) {
|
||||
this(algorithmName, null, context, localPrivateKey,
|
||||
peerPublicKey, null);
|
||||
}
|
||||
|
||||
// When the constructor called by KEM: store the client's public key or the
|
||||
// encapsulated message in keyshare.
|
||||
KAKeyDerivation(String algorithmName,
|
||||
NamedGroup namedGroup,
|
||||
HandshakeContext context,
|
||||
PrivateKey localPrivateKey,
|
||||
PublicKey peerPublicKey,
|
||||
byte[] keyshare) {
|
||||
this.algorithmName = algorithmName;
|
||||
this.context = context;
|
||||
this.localPrivateKey = localPrivateKey;
|
||||
this.peerPublicKey = peerPublicKey;
|
||||
this.keyshare = keyshare;
|
||||
this.provider = (namedGroup != null) ? namedGroup.getProvider() : null;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -94,22 +116,15 @@ public class KAKeyDerivation implements SSLKeyDerivation {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the TLSv1.3 objects, which use the HKDF algorithms.
|
||||
*/
|
||||
private SecretKey t13DeriveKey(String type)
|
||||
throws IOException {
|
||||
SecretKey sharedSecret = null;
|
||||
private SecretKey deriveHandshakeSecret(String label,
|
||||
SecretKey sharedSecret)
|
||||
throws GeneralSecurityException, IOException {
|
||||
SecretKey earlySecret = null;
|
||||
SecretKey saltSecret = null;
|
||||
try {
|
||||
KeyAgreement ka = KeyAgreement.getInstance(algorithmName);
|
||||
ka.init(localPrivateKey);
|
||||
ka.doPhase(peerPublicKey, true);
|
||||
sharedSecret = ka.generateSecret("TlsPremasterSecret");
|
||||
|
||||
CipherSuite.HashAlg hashAlg = context.negotiatedCipherSuite.hashAlg;
|
||||
SSLKeyDerivation kd = context.handshakeKeyDerivation;
|
||||
try {
|
||||
if (kd == null) { // No PSK is in use.
|
||||
// If PSK is not in use, Early Secret will still be
|
||||
// HKDF-Extract(0, 0).
|
||||
@ -129,12 +144,90 @@ public class KAKeyDerivation implements SSLKeyDerivation {
|
||||
// the handshake secret key derivation (below) as it may not
|
||||
// work with the "sharedSecret" obj.
|
||||
KDF hkdf = KDF.getInstance(hashAlg.hkdfAlgorithm);
|
||||
return hkdf.deriveKey(type, HKDFParameterSpec.ofExtract()
|
||||
.addSalt(saltSecret).addIKM(sharedSecret).extractOnly());
|
||||
var spec = HKDFParameterSpec.ofExtract().addSalt(saltSecret);
|
||||
if (sharedSecret instanceof Hybrid.SecretKeyImpl hsk) {
|
||||
spec = spec.addIKM(hsk.k1()).addIKM(hsk.k2());
|
||||
} else {
|
||||
spec = spec.addIKM(sharedSecret);
|
||||
}
|
||||
|
||||
return hkdf.deriveKey(label, spec.extractOnly());
|
||||
} finally {
|
||||
KeyUtil.destroySecretKeys(earlySecret, saltSecret);
|
||||
}
|
||||
}
|
||||
/**
|
||||
* This method is called by the server to perform KEM encapsulation.
|
||||
* It uses the client's public key (sent by the client as a keyshare)
|
||||
* to encapsulate a shared secret and returns the encapsulated message.
|
||||
*
|
||||
* Package-private, used from KeyShareExtension.SHKeyShareProducer::
|
||||
* produce().
|
||||
*/
|
||||
KEM.Encapsulated encapsulate(String algorithm, SecureRandom random)
|
||||
throws IOException {
|
||||
SecretKey sharedSecret = null;
|
||||
|
||||
if (keyshare == null) {
|
||||
throw new IOException("No keyshare available for KEM " +
|
||||
"encapsulation");
|
||||
}
|
||||
|
||||
try {
|
||||
KeyFactory kf = (provider != null) ?
|
||||
KeyFactory.getInstance(algorithmName, provider) :
|
||||
KeyFactory.getInstance(algorithmName);
|
||||
var pk = kf.generatePublic(new RawKeySpec(keyshare));
|
||||
|
||||
KEM kem = (provider != null) ?
|
||||
KEM.getInstance(algorithmName, provider) :
|
||||
KEM.getInstance(algorithmName);
|
||||
KEM.Encapsulator e = kem.newEncapsulator(pk, random);
|
||||
KEM.Encapsulated enc = e.encapsulate();
|
||||
sharedSecret = enc.key();
|
||||
|
||||
SecretKey derived = deriveHandshakeSecret(algorithm, sharedSecret);
|
||||
|
||||
return new KEM.Encapsulated(derived, enc.encapsulation(), null);
|
||||
} catch (GeneralSecurityException gse) {
|
||||
throw new SSLHandshakeException("Could not generate secret", gse);
|
||||
} finally {
|
||||
KeyUtil.destroySecretKeys(sharedSecret, earlySecret, saltSecret);
|
||||
KeyUtil.destroySecretKeys(sharedSecret);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the TLSv1.3 objects, which use the HKDF algorithms.
|
||||
*/
|
||||
private SecretKey t13DeriveKey(String type)
|
||||
throws IOException {
|
||||
SecretKey sharedSecret = null;
|
||||
|
||||
try {
|
||||
if (keyshare != null) {
|
||||
// Using KEM: called by the client after receiving the KEM
|
||||
// ciphertext (keyshare) from the server in ServerHello.
|
||||
// The client decapsulates it using its private key.
|
||||
KEM kem = (provider != null)
|
||||
? KEM.getInstance(algorithmName, provider)
|
||||
: KEM.getInstance(algorithmName);
|
||||
var decapsulator = kem.newDecapsulator(localPrivateKey);
|
||||
sharedSecret = decapsulator.decapsulate(
|
||||
keyshare, 0, decapsulator.secretSize(),
|
||||
"TlsPremasterSecret");
|
||||
} else {
|
||||
// Using traditional DH-style Key Agreement
|
||||
KeyAgreement ka = KeyAgreement.getInstance(algorithmName);
|
||||
ka.init(localPrivateKey);
|
||||
ka.doPhase(peerPublicKey, true);
|
||||
sharedSecret = ka.generateSecret("TlsPremasterSecret");
|
||||
}
|
||||
|
||||
return deriveHandshakeSecret(type, sharedSecret);
|
||||
} catch (GeneralSecurityException gse) {
|
||||
throw new SSLHandshakeException("Could not generate secret", gse);
|
||||
} finally {
|
||||
KeyUtil.destroySecretKeys(sharedSecret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
223
src/java.base/share/classes/sun/security/ssl/KEMKeyExchange.java
Normal file
223
src/java.base/share/classes/sun/security/ssl/KEMKeyExchange.java
Normal file
@ -0,0 +1,223 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation. Oracle designates this
|
||||
* particular file as subject to the "Classpath" exception as provided
|
||||
* by Oracle in the LICENSE file that accompanied this code.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
package sun.security.ssl;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.KeyPair;
|
||||
import java.security.KeyPairGenerator;
|
||||
import java.security.PrivateKey;
|
||||
import java.security.Provider;
|
||||
import java.security.ProviderException;
|
||||
import java.security.PublicKey;
|
||||
import java.security.SecureRandom;
|
||||
import java.security.spec.NamedParameterSpec;
|
||||
import javax.crypto.SecretKey;
|
||||
|
||||
import sun.security.ssl.NamedGroup.NamedGroupSpec;
|
||||
import sun.security.x509.X509Key;
|
||||
|
||||
/**
|
||||
* Specifics for single or hybrid Key exchanges based on KEM
|
||||
*/
|
||||
final class KEMKeyExchange {
|
||||
|
||||
static final SSLKeyAgreementGenerator kemKAGenerator
|
||||
= new KEMKAGenerator();
|
||||
|
||||
static final class KEMCredentials implements NamedGroupCredentials {
|
||||
|
||||
final NamedGroup namedGroup;
|
||||
// Unlike other credentials, we directly store the key share
|
||||
// value here, no need to convert to a key
|
||||
private final byte[] keyshare;
|
||||
|
||||
KEMCredentials(byte[] keyshare, NamedGroup namedGroup) {
|
||||
this.keyshare = keyshare;
|
||||
this.namedGroup = namedGroup;
|
||||
}
|
||||
|
||||
// For KEM, server performs encapsulation and the resulting
|
||||
// encapsulated message becomes the key_share value sent to
|
||||
// the client. It is not a public key, so no PublicKey object
|
||||
// to return.
|
||||
@Override
|
||||
public PublicKey getPublicKey() {
|
||||
throw new UnsupportedOperationException(
|
||||
"KEMCredentials stores raw keyshare, not a PublicKey");
|
||||
}
|
||||
|
||||
public byte[] getKeyShare() {
|
||||
return keyshare;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NamedGroup getNamedGroup() {
|
||||
return namedGroup;
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiates a KEMCredentials object
|
||||
*/
|
||||
static KEMCredentials valueOf(NamedGroup namedGroup,
|
||||
byte[] encodedPoint) {
|
||||
|
||||
if (namedGroup.spec != NamedGroupSpec.NAMED_GROUP_KEM) {
|
||||
throw new RuntimeException(
|
||||
"Credentials decoding: Not KEM named group");
|
||||
}
|
||||
|
||||
if (encodedPoint == null || encodedPoint.length == 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return new KEMCredentials(encodedPoint, namedGroup);
|
||||
}
|
||||
}
|
||||
|
||||
private static class KEMPossession implements SSLPossession {
|
||||
private final NamedGroup namedGroup;
|
||||
|
||||
public KEMPossession(NamedGroup ng) {
|
||||
this.namedGroup = ng;
|
||||
}
|
||||
public NamedGroup getNamedGroup() {
|
||||
return namedGroup;
|
||||
}
|
||||
}
|
||||
|
||||
static final class KEMReceiverPossession extends KEMPossession {
|
||||
|
||||
private final PrivateKey privateKey;
|
||||
private final PublicKey publicKey;
|
||||
|
||||
KEMReceiverPossession(NamedGroup namedGroup, SecureRandom random) {
|
||||
super(namedGroup);
|
||||
String algName = null;
|
||||
try {
|
||||
// For KEM: This receiver side (client) generates a key pair.
|
||||
algName = ((NamedParameterSpec)namedGroup.keAlgParamSpec).
|
||||
getName();
|
||||
Provider provider = namedGroup.getProvider();
|
||||
KeyPairGenerator kpg = (provider != null) ?
|
||||
KeyPairGenerator.getInstance(algName, provider) :
|
||||
KeyPairGenerator.getInstance(algName);
|
||||
|
||||
kpg.initialize(namedGroup.keAlgParamSpec, random);
|
||||
KeyPair kp = kpg.generateKeyPair();
|
||||
privateKey = kp.getPrivate();
|
||||
publicKey = kp.getPublic();
|
||||
} catch (GeneralSecurityException e) {
|
||||
throw new RuntimeException(
|
||||
"Could not generate keypair for algorithm: " +
|
||||
algName, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] encode() {
|
||||
if (publicKey instanceof X509Key xk) {
|
||||
return xk.getKeyAsBytes();
|
||||
} else if (publicKey instanceof Hybrid.PublicKeyImpl hk) {
|
||||
return hk.getEncoded();
|
||||
}
|
||||
throw new ProviderException("Unsupported key type: " + publicKey);
|
||||
}
|
||||
|
||||
// Package-private
|
||||
PublicKey getPublicKey() {
|
||||
return publicKey;
|
||||
}
|
||||
|
||||
// Package-private
|
||||
PrivateKey getPrivateKey() {
|
||||
return privateKey;
|
||||
}
|
||||
}
|
||||
|
||||
static final class KEMSenderPossession extends KEMPossession {
|
||||
|
||||
private SecretKey key;
|
||||
private final SecureRandom random;
|
||||
|
||||
KEMSenderPossession(NamedGroup namedGroup, SecureRandom random) {
|
||||
super(namedGroup);
|
||||
this.random = random;
|
||||
}
|
||||
|
||||
// Package-private
|
||||
SecureRandom getRandom() {
|
||||
return random;
|
||||
}
|
||||
|
||||
// Package-private
|
||||
SecretKey getKey() {
|
||||
return key;
|
||||
}
|
||||
|
||||
// Package-private
|
||||
void setKey(SecretKey key) {
|
||||
this.key = key;
|
||||
}
|
||||
|
||||
@Override
|
||||
public byte[] encode() {
|
||||
throw new UnsupportedOperationException("encode() not supported");
|
||||
}
|
||||
}
|
||||
|
||||
private static final class KEMKAGenerator
|
||||
implements SSLKeyAgreementGenerator {
|
||||
|
||||
// Prevent instantiation of this class.
|
||||
private KEMKAGenerator() {
|
||||
// blank
|
||||
}
|
||||
|
||||
@Override
|
||||
public SSLKeyDerivation createKeyDerivation(
|
||||
HandshakeContext context) throws IOException {
|
||||
for (SSLPossession poss : context.handshakePossessions) {
|
||||
if (poss instanceof KEMReceiverPossession kposs) {
|
||||
NamedGroup ng = kposs.getNamedGroup();
|
||||
for (SSLCredentials cred : context.handshakeCredentials) {
|
||||
if (cred instanceof KEMCredentials kcred &&
|
||||
ng.equals(kcred.namedGroup)) {
|
||||
String name = ((NamedParameterSpec)
|
||||
ng.keAlgParamSpec).getName();
|
||||
return new KAKeyDerivation(name, ng, context,
|
||||
kposs.getPrivateKey(), null,
|
||||
kcred.getKeyShare());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
context.conContext.fatal(Alert.HANDSHAKE_FAILURE,
|
||||
"No suitable KEM key agreement "
|
||||
+ "parameters negotiated");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -27,8 +27,11 @@ package sun.security.ssl;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.security.AlgorithmConstraints;
|
||||
import java.security.CryptoPrimitive;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.spec.AlgorithmParameterSpec;
|
||||
import java.security.spec.NamedParameterSpec;
|
||||
import java.text.MessageFormat;
|
||||
import java.util.*;
|
||||
import javax.net.ssl.SSLProtocolException;
|
||||
@ -297,7 +300,9 @@ final class KeyShareExtension {
|
||||
// update the context
|
||||
chc.handshakePossessions.add(pos);
|
||||
// May need more possession types in the future.
|
||||
if (pos instanceof NamedGroupPossession) {
|
||||
if (pos instanceof NamedGroupPossession ||
|
||||
pos instanceof
|
||||
KEMKeyExchange.KEMReceiverPossession) {
|
||||
return pos.encode();
|
||||
}
|
||||
}
|
||||
@ -358,24 +363,16 @@ final class KeyShareExtension {
|
||||
try {
|
||||
SSLCredentials kaCred =
|
||||
ng.decodeCredentials(entry.keyExchange);
|
||||
if (shc.algorithmConstraints != null &&
|
||||
kaCred instanceof
|
||||
NamedGroupCredentials namedGroupCredentials) {
|
||||
if (!shc.algorithmConstraints.permits(
|
||||
EnumSet.of(CryptoPrimitive.KEY_AGREEMENT),
|
||||
namedGroupCredentials.getPublicKey())) {
|
||||
|
||||
if (!isCredentialPermitted(shc.algorithmConstraints,
|
||||
kaCred)) {
|
||||
if (SSLLogger.isOn() &&
|
||||
SSLLogger.isOn("ssl,handshake")) {
|
||||
SSLLogger.warning(
|
||||
"key share entry of " + ng + " does not " +
|
||||
" comply with algorithm constraints");
|
||||
"comply with algorithm constraints");
|
||||
}
|
||||
|
||||
kaCred = null;
|
||||
}
|
||||
}
|
||||
|
||||
if (kaCred != null) {
|
||||
} else {
|
||||
credentials.add(kaCred);
|
||||
}
|
||||
} catch (GeneralSecurityException ex) {
|
||||
@ -513,7 +510,8 @@ final class KeyShareExtension {
|
||||
@Override
|
||||
public byte[] produce(ConnectionContext context,
|
||||
HandshakeMessage message) throws IOException {
|
||||
// The producing happens in client side only.
|
||||
// The producing happens in server side only.
|
||||
|
||||
ServerHandshakeContext shc = (ServerHandshakeContext)context;
|
||||
|
||||
// In response to key_share request only
|
||||
@ -571,7 +569,9 @@ final class KeyShareExtension {
|
||||
|
||||
SSLPossession[] poses = ke.createPossessions(shc);
|
||||
for (SSLPossession pos : poses) {
|
||||
if (!(pos instanceof NamedGroupPossession)) {
|
||||
if (!(pos instanceof NamedGroupPossession ||
|
||||
pos instanceof
|
||||
KEMKeyExchange.KEMSenderPossession)) {
|
||||
// May need more possession types in the future.
|
||||
continue;
|
||||
}
|
||||
@ -579,7 +579,34 @@ final class KeyShareExtension {
|
||||
// update the context
|
||||
shc.handshakeKeyExchange = ke;
|
||||
shc.handshakePossessions.add(pos);
|
||||
|
||||
// For KEM, perform encapsulation using the client’s public
|
||||
// key (KEMCredentials). The resulting encapsulated message
|
||||
// becomes the key_share value sent to the client. The
|
||||
// shared secret derived from encapsulation is stored in
|
||||
// the KEMSenderPossession for later use in the TLS key
|
||||
// schedule.
|
||||
|
||||
// SSLKeyExchange.createPossessions() returns at most one
|
||||
// key-agreement possession or one KEMSenderPossession
|
||||
// per handshake.
|
||||
if (pos instanceof KEMKeyExchange.KEMSenderPossession xp) {
|
||||
if (cd instanceof KEMKeyExchange.KEMCredentials kcred
|
||||
&& ng.equals(kcred.namedGroup)) {
|
||||
String name = ((NamedParameterSpec)
|
||||
ng.keAlgParamSpec).getName();
|
||||
KAKeyDerivation handshakeKD = new KAKeyDerivation(
|
||||
name, ng, shc, null, null,
|
||||
kcred.getKeyShare());
|
||||
var encaped = handshakeKD.encapsulate(
|
||||
"TlsHandshakeSecret", xp.getRandom());
|
||||
xp.setKey(encaped.key());
|
||||
keyShare = new KeyShareEntry(ng.id,
|
||||
encaped.encapsulation());
|
||||
}
|
||||
} else {
|
||||
keyShare = new KeyShareEntry(ng.id, pos.encode());
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -663,19 +690,13 @@ final class KeyShareExtension {
|
||||
try {
|
||||
SSLCredentials kaCred =
|
||||
ng.decodeCredentials(keyShare.keyExchange);
|
||||
if (chc.algorithmConstraints != null &&
|
||||
kaCred instanceof
|
||||
NamedGroupCredentials namedGroupCredentials) {
|
||||
if (!chc.algorithmConstraints.permits(
|
||||
EnumSet.of(CryptoPrimitive.KEY_AGREEMENT),
|
||||
namedGroupCredentials.getPublicKey())) {
|
||||
|
||||
if (!isCredentialPermitted(chc.algorithmConstraints,
|
||||
kaCred)) {
|
||||
chc.conContext.fatal(Alert.INSUFFICIENT_SECURITY,
|
||||
"key share entry of " + ng + " does not " +
|
||||
" comply with algorithm constraints");
|
||||
}
|
||||
}
|
||||
|
||||
if (kaCred != null) {
|
||||
"comply with algorithm constraints");
|
||||
} else {
|
||||
credentials = kaCred;
|
||||
}
|
||||
} catch (GeneralSecurityException ex) {
|
||||
@ -696,6 +717,34 @@ final class KeyShareExtension {
|
||||
}
|
||||
}
|
||||
|
||||
private static boolean isCredentialPermitted(
|
||||
AlgorithmConstraints constraints,
|
||||
SSLCredentials cred) {
|
||||
|
||||
if (constraints == null) return true;
|
||||
if (cred == null) return false;
|
||||
|
||||
if (cred instanceof NamedGroupCredentials namedGroupCred) {
|
||||
if (namedGroupCred instanceof KEMKeyExchange.KEMCredentials
|
||||
kemCred) {
|
||||
AlgorithmParameterSpec paramSpec = kemCred.getNamedGroup().
|
||||
keAlgParamSpec;
|
||||
String algName = (paramSpec instanceof NamedParameterSpec nps) ?
|
||||
nps.getName() : null;
|
||||
return algName != null && constraints.permits(
|
||||
EnumSet.of(CryptoPrimitive.KEY_AGREEMENT),
|
||||
algName,
|
||||
null);
|
||||
} else {
|
||||
return constraints.permits(
|
||||
EnumSet.of(CryptoPrimitive.KEY_AGREEMENT),
|
||||
namedGroupCred.getPublicKey());
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* The absence processing if the extension is not present in
|
||||
* the ServerHello handshake message.
|
||||
|
||||
@ -214,6 +214,39 @@ enum NamedGroup {
|
||||
ProtocolVersion.PROTOCOLS_TO_13,
|
||||
PredefinedDHParameterSpecs.ffdheParams.get(8192)),
|
||||
|
||||
ML_KEM_512(0x0200, "MLKEM512",
|
||||
NamedGroupSpec.NAMED_GROUP_KEM,
|
||||
ProtocolVersion.PROTOCOLS_OF_13,
|
||||
null),
|
||||
|
||||
ML_KEM_768(0x0201, "MLKEM768",
|
||||
NamedGroupSpec.NAMED_GROUP_KEM,
|
||||
ProtocolVersion.PROTOCOLS_OF_13,
|
||||
null),
|
||||
|
||||
ML_KEM_1024(0x0202, "MLKEM1024",
|
||||
NamedGroupSpec.NAMED_GROUP_KEM,
|
||||
ProtocolVersion.PROTOCOLS_OF_13,
|
||||
null),
|
||||
|
||||
X25519MLKEM768(0x11ec, "X25519MLKEM768",
|
||||
NamedGroupSpec.NAMED_GROUP_KEM,
|
||||
ProtocolVersion.PROTOCOLS_OF_13,
|
||||
Hybrid.X25519_MLKEM768,
|
||||
HybridProvider.PROVIDER),
|
||||
|
||||
SECP256R1MLKEM768(0x11eb, "SecP256r1MLKEM768",
|
||||
NamedGroupSpec.NAMED_GROUP_KEM,
|
||||
ProtocolVersion.PROTOCOLS_OF_13,
|
||||
Hybrid.SECP256R1_MLKEM768,
|
||||
HybridProvider.PROVIDER),
|
||||
|
||||
SECP384R1MLKEM1024(0x11ed, "SecP384r1MLKEM1024",
|
||||
NamedGroupSpec.NAMED_GROUP_KEM,
|
||||
ProtocolVersion.PROTOCOLS_OF_13,
|
||||
Hybrid.SECP384R1_MLKEM1024,
|
||||
HybridProvider.PROVIDER),
|
||||
|
||||
// Elliptic Curves (RFC 4492)
|
||||
//
|
||||
// arbitrary prime and characteristic-2 curves
|
||||
@ -234,22 +267,33 @@ enum NamedGroup {
|
||||
final AlgorithmParameterSpec keAlgParamSpec;
|
||||
final AlgorithmParameters keAlgParams;
|
||||
final boolean isAvailable;
|
||||
final Provider defaultProvider;
|
||||
|
||||
// performance optimization
|
||||
private static final Set<CryptoPrimitive> KEY_AGREEMENT_PRIMITIVE_SET =
|
||||
Collections.unmodifiableSet(EnumSet.of(CryptoPrimitive.KEY_AGREEMENT));
|
||||
|
||||
// Constructor used for all NamedGroup types
|
||||
NamedGroup(int id, String name,
|
||||
NamedGroupSpec namedGroupSpec,
|
||||
ProtocolVersion[] supportedProtocols,
|
||||
AlgorithmParameterSpec keAlgParamSpec) {
|
||||
this(id, name, namedGroupSpec, supportedProtocols, keAlgParamSpec,
|
||||
null);
|
||||
}
|
||||
|
||||
// Constructor used for all NamedGroup types
|
||||
NamedGroup(int id, String name,
|
||||
NamedGroupSpec namedGroupSpec,
|
||||
ProtocolVersion[] supportedProtocols,
|
||||
AlgorithmParameterSpec keAlgParamSpec,
|
||||
Provider defaultProvider) {
|
||||
this.id = id;
|
||||
this.name = name;
|
||||
this.spec = namedGroupSpec;
|
||||
this.algorithm = namedGroupSpec.algorithm;
|
||||
this.supportedProtocols = supportedProtocols;
|
||||
this.keAlgParamSpec = keAlgParamSpec;
|
||||
this.defaultProvider = defaultProvider;
|
||||
|
||||
// Check if it is a supported named group.
|
||||
AlgorithmParameters algParams = null;
|
||||
@ -266,16 +310,28 @@ enum NamedGroup {
|
||||
// Check the specific algorithm parameters.
|
||||
if (mediator) {
|
||||
try {
|
||||
algParams =
|
||||
AlgorithmParameters.getInstance(namedGroupSpec.algorithm);
|
||||
// Skip AlgorithmParameters for KEMs (not supported)
|
||||
// Check KEM's availability via KeyFactory
|
||||
if (namedGroupSpec == NamedGroupSpec.NAMED_GROUP_KEM) {
|
||||
if (defaultProvider == null) {
|
||||
KeyFactory.getInstance(name);
|
||||
} else {
|
||||
KeyFactory.getInstance(name, defaultProvider);
|
||||
}
|
||||
} else {
|
||||
// ECDHE or others: use AlgorithmParameters as before
|
||||
algParams = AlgorithmParameters.getInstance(
|
||||
namedGroupSpec.algorithm);
|
||||
algParams.init(keAlgParamSpec);
|
||||
}
|
||||
} catch (InvalidParameterSpecException
|
||||
| NoSuchAlgorithmException exp) {
|
||||
if (namedGroupSpec != NamedGroupSpec.NAMED_GROUP_XDH) {
|
||||
mediator = false;
|
||||
if (SSLLogger.isOn() && SSLLogger.isOn("ssl,handshake")) {
|
||||
SSLLogger.warning(
|
||||
"No AlgorithmParameters for " + name, exp);
|
||||
"No AlgorithmParameters or KeyFactory for " + name,
|
||||
exp);
|
||||
}
|
||||
} else {
|
||||
// Please remove the following code if the XDH/X25519/X448
|
||||
@ -307,6 +363,10 @@ enum NamedGroup {
|
||||
this.keAlgParams = mediator ? algParams : null;
|
||||
}
|
||||
|
||||
Provider getProvider() {
|
||||
return defaultProvider;
|
||||
}
|
||||
|
||||
//
|
||||
// The next set of methods search & retrieve NamedGroups.
|
||||
//
|
||||
@ -545,6 +605,10 @@ enum NamedGroup {
|
||||
return spec.decodeCredentials(this, encoded);
|
||||
}
|
||||
|
||||
SSLPossession createPossession(boolean isClient, SecureRandom random) {
|
||||
return spec.createPossession(this, isClient, random);
|
||||
}
|
||||
|
||||
SSLPossession createPossession(SecureRandom random) {
|
||||
return spec.createPossession(this, random);
|
||||
}
|
||||
@ -566,6 +630,11 @@ enum NamedGroup {
|
||||
|
||||
SSLKeyDerivation createKeyDerivation(
|
||||
HandshakeContext hc) throws IOException;
|
||||
|
||||
default SSLPossession createPossession(NamedGroup ng, boolean isClient,
|
||||
SecureRandom random) {
|
||||
return createPossession(ng, random);
|
||||
}
|
||||
}
|
||||
|
||||
enum NamedGroupSpec implements NamedGroupScheme {
|
||||
@ -578,6 +647,10 @@ enum NamedGroup {
|
||||
// Finite Field Groups (XDH)
|
||||
NAMED_GROUP_XDH("XDH", XDHScheme.instance),
|
||||
|
||||
// Post-Quantum Cryptography (PQC) KEM groups
|
||||
// Currently used for hybrid named groups
|
||||
NAMED_GROUP_KEM("KEM", KEMScheme.instance),
|
||||
|
||||
// arbitrary prime and curves (ECDHE)
|
||||
NAMED_GROUP_ARBITRARY("EC", null),
|
||||
|
||||
@ -634,6 +707,15 @@ enum NamedGroup {
|
||||
return null;
|
||||
}
|
||||
|
||||
public SSLPossession createPossession(
|
||||
NamedGroup ng, boolean isClient, SecureRandom random) {
|
||||
if (scheme != null) {
|
||||
return scheme.createPossession(ng, isClient, random);
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SSLPossession createPossession(
|
||||
NamedGroup ng, SecureRandom random) {
|
||||
@ -739,6 +821,42 @@ enum NamedGroup {
|
||||
}
|
||||
}
|
||||
|
||||
private static class KEMScheme implements NamedGroupScheme {
|
||||
private static final KEMScheme instance = new KEMScheme();
|
||||
|
||||
@Override
|
||||
public byte[] encodePossessionPublicKey(NamedGroupPossession poss) {
|
||||
return poss.encode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SSLCredentials decodeCredentials(NamedGroup ng,
|
||||
byte[] encoded) throws IOException, GeneralSecurityException {
|
||||
return KEMKeyExchange.KEMCredentials.valueOf(ng, encoded);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SSLPossession createPossession(NamedGroup ng,
|
||||
SecureRandom random) {
|
||||
// Must call createPossession with isClient
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public SSLPossession createPossession(
|
||||
NamedGroup ng, boolean isClient, SecureRandom random) {
|
||||
return isClient
|
||||
? new KEMKeyExchange.KEMReceiverPossession(ng, random)
|
||||
: new KEMKeyExchange.KEMSenderPossession(ng, random);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SSLKeyDerivation createKeyDerivation(
|
||||
HandshakeContext hc) throws IOException {
|
||||
return KEMKeyExchange.kemKAGenerator.createKeyDerivation(hc);
|
||||
}
|
||||
}
|
||||
|
||||
static final class SupportedGroups {
|
||||
// the supported named groups, non-null immutable list
|
||||
static final String[] namedGroups;
|
||||
@ -784,6 +902,9 @@ enum NamedGroup {
|
||||
} else { // default groups
|
||||
NamedGroup[] groups = new NamedGroup[] {
|
||||
|
||||
// Hybrid key agreement
|
||||
X25519MLKEM768,
|
||||
|
||||
// Primary XDH (RFC 7748) curves
|
||||
X25519,
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -570,7 +570,9 @@ final class SSLKeyExchange implements SSLKeyAgreementGenerator,
|
||||
|
||||
@Override
|
||||
public SSLPossession createPossession(HandshakeContext hc) {
|
||||
return namedGroup.createPossession(hc.sslContext.getSecureRandom());
|
||||
return namedGroup.createPossession(
|
||||
hc instanceof ClientHandshakeContext,
|
||||
hc.sslContext.getSecureRandom());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
||||
@ -565,6 +565,34 @@ final class ServerHello {
|
||||
clientHello);
|
||||
shc.serverHelloRandom = shm.serverRandom;
|
||||
|
||||
// For key derivation, we will either use the traditional Key
|
||||
// Agreement (KA) model or the Key Encapsulation Mechanism (KEM)
|
||||
// model, depending on what key exchange group is used.
|
||||
//
|
||||
// For KA flows, the server first receives the client's share,
|
||||
// then generates its key share, and finally comes here.
|
||||
// However, this is changed for KEM: the server
|
||||
// must perform both actions — derive the secret and generate
|
||||
// the key encapsulation message at the same time during
|
||||
// encapsulation in SHKeyShareProducer.
|
||||
//
|
||||
// Traditional Key Agreement (KA):
|
||||
// - Both peers generate a key share and exchange it.
|
||||
// - Each peer computes a shared secret sometime after
|
||||
// receiving the other's key share.
|
||||
//
|
||||
// Key Encapsulation Mechanism (KEM):
|
||||
// The client publishes a public key via a KeyShareExtension,
|
||||
// which the server uses to:
|
||||
//
|
||||
// - generate the shared secret
|
||||
// - encapsulate the message which is sent to the client in
|
||||
// another KeyShareExtension
|
||||
//
|
||||
// The derived shared secret must be stored in a
|
||||
// KEMSenderPossession so it can be retrieved for handshake
|
||||
// traffic secret derivation later.
|
||||
|
||||
// Produce extensions for ServerHello handshake message.
|
||||
SSLExtension[] serverHelloExtensions =
|
||||
shc.sslConfig.getEnabledExtensions(
|
||||
@ -590,9 +618,26 @@ final class ServerHello {
|
||||
"Not negotiated key shares");
|
||||
}
|
||||
|
||||
SecretKey handshakeSecret = null;
|
||||
|
||||
// For KEM, the shared secret has already been generated and
|
||||
// stored in the server’s possession (KEMSenderPossession)
|
||||
// during encapsulation in SHKeyShareProducer.
|
||||
//
|
||||
// Only one key share is selected by the server, so at most one
|
||||
// possession will contain the pre-derived shared secret.
|
||||
for (var pos : shc.handshakePossessions) {
|
||||
if (pos instanceof KEMKeyExchange.KEMSenderPossession xp) {
|
||||
handshakeSecret = xp.getKey();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (handshakeSecret == null) {
|
||||
SSLKeyDerivation handshakeKD = ke.createKeyDerivation(shc);
|
||||
SecretKey handshakeSecret = handshakeKD.deriveKey(
|
||||
handshakeSecret = handshakeKD.deriveKey(
|
||||
"TlsHandshakeSecret");
|
||||
}
|
||||
|
||||
SSLTrafficKeyDerivation kdg =
|
||||
SSLTrafficKeyDerivation.valueOf(shc.negotiatedProtocol);
|
||||
|
||||
@ -104,6 +104,10 @@ public class X509Key implements PublicKey, DerEncoder {
|
||||
return (BitArray)bitStringKey.clone();
|
||||
}
|
||||
|
||||
public byte[] getKeyAsBytes() {
|
||||
return bitStringKey.toByteArray();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct X.509 subject public key from a DER value. If
|
||||
* the runtime environment is configured with a specific class for
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -355,20 +355,9 @@ public final class CPrinterJob extends RasterPrinterJob {
|
||||
validateDestination(destinationAttr);
|
||||
}
|
||||
|
||||
/* Get the range of pages we are to print. If the
|
||||
* last page to print is unknown, then we print to
|
||||
* the end of the document. Note that firstPage
|
||||
* and lastPage are 0 based page indices.
|
||||
*/
|
||||
|
||||
// Note that firstPage is 0 based page index.
|
||||
int firstPage = getFirstPage();
|
||||
int lastPage = getLastPage();
|
||||
if(lastPage == Pageable.UNKNOWN_NUMBER_OF_PAGES) {
|
||||
int totalPages = mDocument.getNumberOfPages();
|
||||
if (totalPages != Pageable.UNKNOWN_NUMBER_OF_PAGES) {
|
||||
lastPage = mDocument.getNumberOfPages() - 1;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
synchronized (this) {
|
||||
@ -393,7 +382,7 @@ public final class CPrinterJob extends RasterPrinterJob {
|
||||
try {
|
||||
// Fire off the print rendering loop on the AppKit thread, and don't have
|
||||
// it wait and block this thread.
|
||||
if (printLoop(false, firstPage, lastPage)) {
|
||||
if (printLoop(false, firstPage, totalPages)) {
|
||||
// Start a secondary loop on EDT until printing operation is finished or cancelled
|
||||
printingLoop.enter();
|
||||
}
|
||||
@ -407,7 +396,7 @@ public final class CPrinterJob extends RasterPrinterJob {
|
||||
onEventThread = false;
|
||||
|
||||
try {
|
||||
printLoop(true, firstPage, lastPage);
|
||||
printLoop(true, firstPage, totalPages);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
@ -417,7 +406,6 @@ public final class CPrinterJob extends RasterPrinterJob {
|
||||
}
|
||||
if (++loopi < prMembers.length) {
|
||||
firstPage = prMembers[loopi][0]-1;
|
||||
lastPage = prMembers[loopi][1] -1;
|
||||
}
|
||||
} while (loopi < prMembers.length);
|
||||
} finally {
|
||||
@ -693,7 +681,7 @@ public final class CPrinterJob extends RasterPrinterJob {
|
||||
}
|
||||
}
|
||||
|
||||
private native boolean printLoop(boolean waitUntilDone, int firstPage, int lastPage) throws PrinterException;
|
||||
private native boolean printLoop(boolean waitUntilDone, int firstPage, int totalPages) throws PrinterException;
|
||||
|
||||
private PageFormat getPageFormat(int pageIndex) {
|
||||
// This is called from the native side.
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -656,7 +656,7 @@ JNI_COCOA_EXIT(env);
|
||||
* Signature: ()V
|
||||
*/
|
||||
JNIEXPORT jboolean JNICALL Java_sun_lwawt_macosx_CPrinterJob_printLoop
|
||||
(JNIEnv *env, jobject jthis, jboolean blocks, jint firstPage, jint lastPage)
|
||||
(JNIEnv *env, jobject jthis, jboolean blocks, jint firstPage, jint totalPages)
|
||||
{
|
||||
AWT_ASSERT_NOT_APPKIT_THREAD;
|
||||
|
||||
@ -672,14 +672,14 @@ JNIEXPORT jboolean JNICALL Java_sun_lwawt_macosx_CPrinterJob_printLoop
|
||||
JNI_COCOA_ENTER(env);
|
||||
// Get the first page's PageFormat for setting things up (This introduces
|
||||
// and is a facet of the same problem in Radar 2818593/2708932).
|
||||
jobject page = (*env)->CallObjectMethod(env, jthis, jm_getPageFormat, 0); // AWT_THREADING Safe (!appKit)
|
||||
jobject page = (*env)->CallObjectMethod(env, jthis, jm_getPageFormat, firstPage); // AWT_THREADING Safe (!appKit)
|
||||
CHECK_EXCEPTION();
|
||||
if (page != NULL) {
|
||||
jobject pageFormatArea = (*env)->CallObjectMethod(env, jthis, jm_getPageFormatArea, page); // AWT_THREADING Safe (!appKit)
|
||||
CHECK_EXCEPTION();
|
||||
|
||||
PrinterView* printerView = [[PrinterView alloc] initWithFrame:JavaToNSRect(env, pageFormatArea) withEnv:env withPrinterJob:jthis];
|
||||
[printerView setFirstPage:firstPage lastPage:lastPage];
|
||||
[printerView setTotalPages:totalPages];
|
||||
|
||||
GET_NSPRINTINFO_METHOD_RETURN(NO)
|
||||
NSPrintInfo* printInfo = (NSPrintInfo*)jlong_to_ptr((*env)->CallLongMethod(env, jthis, sjm_getNSPrintInfo)); // AWT_THREADING Safe (known object)
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,12 +32,12 @@
|
||||
jobject fCurPainter;
|
||||
jobject fCurPeekGraphics;
|
||||
|
||||
jint fFirstPage, fLastPage;
|
||||
jint fTotalPages;
|
||||
}
|
||||
|
||||
- (id)initWithFrame:(NSRect)aRect withEnv:(JNIEnv*)env withPrinterJob:(jobject)printerJob;
|
||||
|
||||
- (void)setFirstPage:(jint)firstPage lastPage:(jint)lastPage;
|
||||
- (void)setTotalPages:(jint)totalPages;
|
||||
|
||||
- (void)releaseReferences:(JNIEnv*)env;
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,9 +72,8 @@ static jclass sjc_PAbortEx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setFirstPage:(jint)firstPage lastPage:(jint)lastPage {
|
||||
fFirstPage = firstPage;
|
||||
fLastPage = lastPage;
|
||||
- (void)setTotalPages:(jint)totalPages {
|
||||
fTotalPages = totalPages;
|
||||
}
|
||||
|
||||
- (void)drawRect:(NSRect)aRect
|
||||
@ -156,15 +155,15 @@ static jclass sjc_PAbortEx = NULL;
|
||||
return NO;
|
||||
}
|
||||
|
||||
aRange->location = fFirstPage + 1;
|
||||
aRange->location = 1;
|
||||
|
||||
if (fLastPage == java_awt_print_Pageable_UNKNOWN_NUMBER_OF_PAGES)
|
||||
if (fTotalPages == java_awt_print_Pageable_UNKNOWN_NUMBER_OF_PAGES)
|
||||
{
|
||||
aRange->length = NSIntegerMax;
|
||||
}
|
||||
else
|
||||
{
|
||||
aRange->length = (fLastPage + 1) - fFirstPage;
|
||||
aRange->length = fTotalPages;
|
||||
}
|
||||
|
||||
return YES;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -339,7 +339,7 @@ Java_sun_java2d_xr_XRBackendNative_createPixmap(JNIEnv *env, jobject this,
|
||||
JNIEXPORT jint JNICALL
|
||||
Java_sun_java2d_xr_XRBackendNative_createPictureNative
|
||||
(JNIEnv *env, jclass cls, jint drawable, jlong formatPtr) {
|
||||
XRenderPictureAttributes pict_attr;
|
||||
XRenderPictureAttributes pict_attr = {0};
|
||||
return XRenderCreatePicture(awt_display, (Drawable) drawable,
|
||||
(XRenderPictFormat *) jlong_to_ptr(formatPtr),
|
||||
0, &pict_attr);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -905,11 +905,47 @@ public final class WindowsIconFactory implements Serializable
|
||||
XPStyle xp = XPStyle.getXP();
|
||||
if (xp != null) {
|
||||
Skin skin = xp.getSkin(c, part);
|
||||
if (WindowsGraphicsUtils.isLeftToRight(c)) {
|
||||
if (icon == null || icon.getIconHeight() <= 16) {
|
||||
skin.paintSkin(g, x + OFFSET, y + OFFSET, state);
|
||||
} else {
|
||||
skin.paintSkin(g, x + OFFSET, y + icon.getIconHeight() / 2, state);
|
||||
}
|
||||
} else {
|
||||
if (icon == null) {
|
||||
skin.paintSkin(g, x + 4 * OFFSET, y + OFFSET, state);
|
||||
} else {
|
||||
int ycoord = (icon.getIconHeight() <= 16)
|
||||
? y + OFFSET
|
||||
: (y + icon.getIconHeight() / 2);
|
||||
if (icon.getIconWidth() <= 8) {
|
||||
skin.paintSkin(g, x + OFFSET, ycoord, state);
|
||||
} else if (icon.getIconWidth() <= 16) {
|
||||
if (menuItem.getText().isEmpty()) {
|
||||
skin.paintSkin(g,
|
||||
(menuItem.getAccelerator() != null)
|
||||
? (x + 2 * OFFSET) : (x + 3 * OFFSET),
|
||||
ycoord, state);
|
||||
} else {
|
||||
skin.paintSkin(g,
|
||||
(type == JRadioButtonMenuItem.class)
|
||||
? (x + 4 * OFFSET) : (x + 3 * OFFSET),
|
||||
ycoord, state);
|
||||
}
|
||||
} else {
|
||||
if (menuItem.getText().isEmpty()
|
||||
|| menuItem.getAccelerator() != null) {
|
||||
skin.paintSkin(g,
|
||||
(type == JRadioButtonMenuItem.class)
|
||||
? (x + 3 * OFFSET) : (x + 4 * OFFSET),
|
||||
ycoord, state);
|
||||
} else {
|
||||
skin.paintSkin(g, x + 7 * OFFSET,
|
||||
ycoord, state);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user