Merge remote-tracking branch 'origin/master' into strlen

This commit is contained in:
Liam Miller-Cushon 2026-02-28 19:31:44 +01:00
commit 357db1d135
271 changed files with 9150 additions and 3129 deletions

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -66,7 +66,8 @@ CALLED_SPEC_TARGETS := $(filter-out $(ALL_GLOBAL_TARGETS), $(CALLED_TARGETS))
ifeq ($(CALLED_SPEC_TARGETS), )
SKIP_SPEC := true
endif
ifeq ($(findstring p, $(MAKEFLAGS))$(findstring q, $(MAKEFLAGS)), pq)
MFLAGS_SINGLE := $(filter-out --%, $(MFLAGS))
ifeq ($(findstring p, $(MFLAGS_SINGLE))$(findstring q, $(MFLAGS_SINGLE)), pq)
SKIP_SPEC := true
endif

View File

@ -267,8 +267,8 @@ AC_DEFUN_ONCE([LIB_SETUP_ZLIB],
LIBZ_LIBS=""
if test "x$USE_EXTERNAL_LIBZ" = "xfalse"; then
LIBZ_CFLAGS="$LIBZ_CFLAGS -I$TOPDIR/src/java.base/share/native/libzip/zlib"
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
LIBZ_CFLAGS="$LIBZ_CFLAGS -DHAVE_UNISTD_H"
if test "x$OPENJDK_TARGET_OS" = xmacosx -o "x$OPENJDK_TARGET_OS" = xaix -o "x$OPENJDK_TARGET_OS" = xlinux; then
LIBZ_CFLAGS="$LIBZ_CFLAGS -DHAVE_UNISTD_H=1 -DHAVE_STDARG_H=1"
fi
else
LIBZ_LIBS="-lz"

View File

@ -3403,11 +3403,13 @@ encode %{
} else if (rtype == relocInfo::metadata_type) {
__ mov_metadata(dst_reg, (Metadata*)con);
} else {
assert(rtype == relocInfo::none, "unexpected reloc type");
assert(rtype == relocInfo::none || rtype == relocInfo::external_word_type, "unexpected reloc type");
// load fake address constants using a normal move
if (! __ is_valid_AArch64_address(con) ||
con < (address)(uintptr_t)os::vm_page_size()) {
__ mov(dst_reg, con);
} else {
// no reloc so just use adrp and add
uint64_t offset;
__ adrp(dst_reg, con, offset);
__ add(dst_reg, dst_reg, offset);
@ -4535,6 +4537,18 @@ operand immP_1()
interface(CONST_INTER);
%}
// AOT Runtime Constants Address
operand immAOTRuntimeConstantsAddress()
%{
// Check if the address is in the range of AOT Runtime Constants
predicate(AOTRuntimeConstants::contains((address)(n->get_ptr())));
match(ConP);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Float and Double operands
// Double Immediate
operand immD()
@ -6898,6 +6912,20 @@ instruct loadConP1(iRegPNoSp dst, immP_1 con)
ins_pipe(ialu_imm);
%}
instruct loadAOTRCAddress(iRegPNoSp dst, immAOTRuntimeConstantsAddress con)
%{
match(Set dst con);
ins_cost(INSN_COST);
format %{ "adr $dst, $con\t# AOT Runtime Constants Address" %}
ins_encode %{
__ load_aotrc_address($dst$$Register, (address)$con$$constant);
%}
ins_pipe(ialu_imm);
%}
// Load Narrow Pointer Constant
instruct loadConN(iRegNNoSp dst, immN con)

View File

@ -33,6 +33,7 @@
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "code/aotCodeCache.hpp"
#include "code/compiledIC.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gc_globals.hpp"
@ -532,6 +533,15 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
case T_LONG: {
assert(patch_code == lir_patch_none, "no patching handled here");
#if INCLUDE_CDS
if (AOTCodeCache::is_on_for_dump()) {
address b = c->as_pointer();
if (AOTRuntimeConstants::contains(b)) {
__ load_aotrc_address(dest->as_register_lo(), b);
break;
}
}
#endif
__ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
break;
}

View File

@ -23,6 +23,7 @@
*/
#include "asm/macroAssembler.inline.hpp"
#include "code/aotCodeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
@ -243,9 +244,25 @@ static void generate_post_barrier(MacroAssembler* masm,
assert_different_registers(store_addr, new_val, thread, tmp1, tmp2, noreg, rscratch1);
// Does store cross heap regions?
__ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
__ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
__ cbz(tmp1, done);
#if INCLUDE_CDS
// AOT code needs to load the barrier grain shift from the aot
// runtime constants area in the code cache otherwise we can compile
// it as an immediate operand
if (AOTCodeCache::is_on_for_dump()) {
address grain_shift_address = (address)AOTRuntimeConstants::grain_shift_address();
__ eor(tmp1, store_addr, new_val);
__ lea(tmp2, ExternalAddress(grain_shift_address));
__ ldrb(tmp2, tmp2);
__ lsrv(tmp1, tmp1, tmp2);
__ cbz(tmp1, done);
} else
#endif
{
__ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
__ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
__ cbz(tmp1, done);
}
// Crosses regions, storing null?
if (new_val_may_be_null) {
__ cbz(new_val, done);

View File

@ -5754,6 +5754,14 @@ void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_off
}
void MacroAssembler::load_byte_map_base(Register reg) {
#if INCLUDE_CDS
if (AOTCodeCache::is_on_for_dump()) {
address byte_map_base_adr = AOTRuntimeConstants::card_table_base_address();
lea(reg, ExternalAddress(byte_map_base_adr));
ldr(reg, Address(reg));
return;
}
#endif
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
// Strictly speaking the card table base isn't an address at all, and it might
@ -5761,6 +5769,20 @@ void MacroAssembler::load_byte_map_base(Register reg) {
mov(reg, (uint64_t)ctbs->card_table_base_const());
}
void MacroAssembler::load_aotrc_address(Register reg, address a) {
#if INCLUDE_CDS
assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
if (AOTCodeCache::is_on_for_dump()) {
// all aotrc field addresses should be registered in the AOTCodeCache address table
lea(reg, ExternalAddress(a));
} else {
mov(reg, (uint64_t)a);
}
#else
ShouldNotReachHere();
#endif
}
void MacroAssembler::build_frame(int framesize) {
assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");

View File

@ -1476,6 +1476,9 @@ public:
// Load the base of the cardtable byte map into reg.
void load_byte_map_base(Register reg);
// Load a constant address in the AOT Runtime Constants area
void load_aotrc_address(Register reg, address a);
// Prolog generator routines to support switch between x86 code and
// generated ARM code

View File

@ -446,7 +446,9 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
}
} else if (UseBlockZeroing) {
warning("DC ZVA is not available on this CPU");
if (!FLAG_IS_DEFAULT(UseBlockZeroing)) {
warning("DC ZVA is not available on this CPU");
}
FLAG_SET_DEFAULT(UseBlockZeroing, false);
}

View File

@ -207,7 +207,7 @@ public:
return false;
}
static bool is_zva_enabled() { return 0 <= _zva_length; }
static bool is_zva_enabled() { return 0 < _zva_length; }
static int zva_length() {
assert(is_zva_enabled(), "ZVA not available");
return _zva_length;

View File

@ -32,6 +32,7 @@
#include "c1/c1_ValueStack.hpp"
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "code/aotCodeCache.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gc_globals.hpp"
@ -535,6 +536,15 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
case T_LONG: {
assert(patch_code == lir_patch_none, "no patching handled here");
#if INCLUDE_CDS
if (AOTCodeCache::is_on_for_dump()) {
address b = c->as_pointer();
if (AOTRuntimeConstants::contains(b)) {
__ load_aotrc_address(dest->as_register_lo(), b);
break;
}
}
#endif
__ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
break;
}

View File

@ -23,6 +23,7 @@
*/
#include "asm/macroAssembler.inline.hpp"
#include "code/aotCodeCache.hpp"
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
@ -268,6 +269,16 @@ void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm,
__ bind(done);
}
#if INCLUDE_CDS
// return a register that differs from reg1, reg2, reg3 and reg4
static Register pick_different_reg(Register reg1, Register reg2 = noreg, Register reg3= noreg, Register reg4 = noreg) {
RegSet available = (RegSet::of(rscratch1, rscratch2, rax, rbx) + rdx -
RegSet::of(reg1, reg2, reg3, reg4));
return *(available.begin());
}
#endif // INCLUDE_CDS
static void generate_post_barrier(MacroAssembler* masm,
const Register store_addr,
const Register new_val,
@ -280,10 +291,32 @@ static void generate_post_barrier(MacroAssembler* masm,
Label L_done;
// Does store cross heap regions?
__ movptr(tmp1, store_addr); // tmp1 := store address
__ xorptr(tmp1, new_val); // tmp1 := store address ^ new value
__ shrptr(tmp1, G1HeapRegion::LogOfHRGrainBytes); // ((store address ^ new value) >> LogOfHRGrainBytes) == 0?
__ jccb(Assembler::equal, L_done);
#if INCLUDE_CDS
// AOT code needs to load the barrier grain shift from the aot
// runtime constants area in the code cache otherwise we can compile
// it as an immediate operand
if (AOTCodeCache::is_on_for_dump()) {
address grain_shift_addr = AOTRuntimeConstants::grain_shift_address();
Register save = pick_different_reg(rcx, tmp1, new_val, store_addr);
__ push(save);
__ movptr(save, store_addr);
__ xorptr(save, new_val);
__ push(rcx);
__ lea(rcx, ExternalAddress(grain_shift_addr));
__ movl(rcx, Address(rcx, 0));
__ shrptr(save);
__ pop(rcx);
__ pop(save);
__ jcc(Assembler::equal, L_done);
} else
#endif // INCLUDE_CDS
{
__ movptr(tmp1, store_addr); // tmp1 := store address
__ xorptr(tmp1, new_val); // tmp1 := store address ^ new value
__ shrptr(tmp1, G1HeapRegion::LogOfHRGrainBytes); // ((store address ^ new value) >> LogOfHRGrainBytes) == 0?
__ jccb(Assembler::equal, L_done);
}
// Crosses regions, storing null?
if (new_val_may_be_null) {

View File

@ -23,6 +23,7 @@
*/
#include "asm/macroAssembler.inline.hpp"
#include "code/aotCodeCache.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
@ -111,7 +112,15 @@ void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembl
__ shrptr(end, CardTable::card_shift());
__ subptr(end, addr); // end --> cards count
__ mov64(tmp, (intptr_t)ctbs->card_table_base_const());
#if INCLUDE_CDS
if (AOTCodeCache::is_on_for_dump()) {
__ lea(tmp, ExternalAddress(AOTRuntimeConstants::card_table_base_address()));
__ movq(tmp, Address(tmp, 0));
} else
#endif
{
__ mov64(tmp, (intptr_t)ctbs->card_table_base_const());
}
__ addptr(addr, tmp);
__ BIND(L_loop);
__ movb(Address(addr, count, Address::times_1), 0);
@ -121,7 +130,7 @@ __ BIND(L_loop);
__ BIND(L_done);
}
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst) {
void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register obj, Address dst, Register rscratch) {
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
CardTableBarrierSet* ctbs = CardTableBarrierSet::barrier_set();
@ -136,6 +145,13 @@ void CardTableBarrierSetAssembler::store_check(MacroAssembler* masm, Register ob
// never need to be relocated. On 64bit however the value may be too
// large for a 32bit displacement.
intptr_t byte_map_base = (intptr_t)ctbs->card_table_base_const();
#if INCLUDE_CDS
if (AOTCodeCache::is_on_for_dump()) {
__ lea(rscratch, ExternalAddress(AOTRuntimeConstants::card_table_base_address()));
__ movq(rscratch, Address(rscratch, 0));
card_addr = Address(rscratch, obj, Address::times_1, 0);
} else
#endif
if (__ is_simm32(byte_map_base)) {
card_addr = Address(noreg, obj, Address::times_1, byte_map_base);
} else {
@ -174,10 +190,10 @@ void CardTableBarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorS
if (needs_post_barrier) {
// flatten object address if needed
if (!precise || (dst.index() == noreg && dst.disp() == 0)) {
store_check(masm, dst.base(), dst);
store_check(masm, dst.base(), dst, tmp2);
} else {
__ lea(tmp1, dst);
store_check(masm, tmp1, dst);
store_check(masm, tmp1, dst, tmp2);
}
}
}

View File

@ -33,7 +33,7 @@ protected:
virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
Register addr, Register count) {}
void store_check(MacroAssembler* masm, Register obj, Address dst);
void store_check(MacroAssembler* masm, Register obj, Address dst, Register rscratch);
virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp);

View File

@ -10034,6 +10034,20 @@ void MacroAssembler::restore_legacy_gprs() {
addq(rsp, 16 * wordSize);
}
void MacroAssembler::load_aotrc_address(Register reg, address a) {
#if INCLUDE_CDS
assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
if (AOTCodeCache::is_on_for_dump()) {
// all aotrc field addresses should be registered in the AOTCodeCache address table
lea(reg, ExternalAddress(a));
} else {
mov64(reg, (uint64_t)a);
}
#else
ShouldNotReachHere();
#endif
}
void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) {
if (VM_Version::supports_apx_f()) {
esetzucc(comparison, dst);

View File

@ -2070,6 +2070,7 @@ public:
void save_legacy_gprs();
void restore_legacy_gprs();
void load_aotrc_address(Register reg, address a);
void setcc(Assembler::Condition comparison, Register dst);
};

View File

@ -5187,6 +5187,18 @@ operand immL_65535()
interface(CONST_INTER);
%}
// AOT Runtime Constants Address
operand immAOTRuntimeConstantsAddress()
%{
// Check if the address is in the range of AOT Runtime Constants
predicate(AOTRuntimeConstants::contains((address)(n->get_ptr())));
match(ConP);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand kReg()
%{
constraint(ALLOC_IN_RC(vectmask_reg));
@ -7332,6 +7344,19 @@ instruct loadD(regD dst, memory mem)
ins_pipe(pipe_slow); // XXX
%}
instruct loadAOTRCAddress(rRegP dst, immAOTRuntimeConstantsAddress con)
%{
match(Set dst con);
format %{ "leaq $dst, $con\t# AOT Runtime Constants Address" %}
ins_encode %{
__ load_aotrc_address($dst$$Register, (address)$con$$constant);
%}
ins_pipe(ialu_reg_fat);
%}
// max = java.lang.Math.max(float a, float b)
instruct maxF_reg_avx10_2(regF dst, regF a, regF b) %{
predicate(VM_Version::supports_avx10_2());

View File

@ -27,22 +27,30 @@
#include "runtime/vm_version.hpp"
int VM_Version::get_current_sve_vector_length() {
assert(_features & CPU_SVE, "should not call this");
assert(VM_Version::supports_sve(), "should not call this");
ShouldNotReachHere();
return 0;
}
int VM_Version::set_and_get_current_sve_vector_length(int length) {
assert(_features & CPU_SVE, "should not call this");
assert(VM_Version::supports_sve(), "should not call this");
ShouldNotReachHere();
return 0;
}
void VM_Version::get_os_cpu_info() {
if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE)) _features |= CPU_CRC32;
if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) _features |= CPU_AES | CPU_SHA1 | CPU_SHA2;
if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE)) _features |= CPU_ASIMD;
if (IsProcessorFeaturePresent(PF_ARM_V8_CRC32_INSTRUCTIONS_AVAILABLE)) {
set_feature(CPU_CRC32);
}
if (IsProcessorFeaturePresent(PF_ARM_V8_CRYPTO_INSTRUCTIONS_AVAILABLE)) {
set_feature(CPU_AES);
set_feature(CPU_SHA1);
set_feature(CPU_SHA2);
}
if (IsProcessorFeaturePresent(PF_ARM_VFP_32_REGISTERS_AVAILABLE)) {
set_feature(CPU_ASIMD);
}
// No check for CPU_PMULL, CPU_SVE, CPU_SVE2
__int64 dczid_el0 = _ReadStatusReg(0x5807 /* ARM64_DCZID_EL0 */);

View File

@ -213,6 +213,7 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._VM_file._name));
AD.addInclude(AD._CPP_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._CPP_file, "memory/allocation.inline.hpp");
AD.addInclude(AD._CPP_file, "code/aotCodeCache.hpp");
AD.addInclude(AD._CPP_file, "code/codeCache.hpp");
AD.addInclude(AD._CPP_file, "code/compiledIC.hpp");
AD.addInclude(AD._CPP_file, "code/nativeInst.hpp");
@ -257,6 +258,7 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_PEEPHOLE_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._CPP_PIPELINE_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._DFA_file, "adfiles", get_basename(AD._HPP_file._name));
AD.addInclude(AD._DFA_file, "code/aotCodeCache.hpp");
AD.addInclude(AD._DFA_file, "oops/compressedOops.hpp");
AD.addInclude(AD._DFA_file, "opto/cfgnode.hpp"); // Use PROB_MAX in predicate.
AD.addInclude(AD._DFA_file, "opto/intrinsicnode.hpp");

View File

@ -1029,7 +1029,7 @@ class methodHandle;
do_intrinsic(_VectorUnaryLibOp, jdk_internal_vm_vector_VectorSupport, vector_unary_lib_op_name, vector_unary_lib_op_sig, F_S) \
do_signature(vector_unary_lib_op_sig,"(J" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/String;" \
"Ljdk/internal/vm/vector/VectorSupport$Vector;" \
@ -1040,7 +1040,7 @@ class methodHandle;
do_intrinsic(_VectorBinaryLibOp, jdk_internal_vm_vector_VectorSupport, vector_binary_lib_op_name, vector_binary_lib_op_sig, F_S) \
do_signature(vector_binary_lib_op_sig,"(J" \
"Ljava/lang/Class;" \
"Ljava/lang/Class;" \
"I" \
"I" \
"Ljava/lang/String;" \
"Ljdk/internal/vm/vector/VectorSupport$VectorPayload;" \

View File

@ -29,9 +29,11 @@
#include "cds/cds_globals.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.hpp"
#include "ci/ciUtilities.hpp"
#include "classfile/javaAssertions.hpp"
#include "code/aotCodeCache.hpp"
#include "code/codeCache.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/gcConfig.hpp"
#include "logging/logStream.hpp"
#include "memory/memoryReserver.hpp"
@ -53,6 +55,7 @@
#endif
#if INCLUDE_G1GC
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#endif
#if INCLUDE_SHENANDOAHGC
#include "gc/shenandoah/shenandoahRuntime.hpp"
@ -258,6 +261,9 @@ void AOTCodeCache::init2() {
return;
}
// initialize aot runtime constants as appropriate to this runtime
AOTRuntimeConstants::initialize_from_runtime();
// initialize the table of external routines so we can save
// generated code blobs that reference them
AOTCodeAddressTable* table = opened_cache->_table;
@ -1447,6 +1453,12 @@ void AOTCodeAddressTable::init_extrs() {
#endif
#endif // ZERO
// addresses of fields in AOT runtime constants area
address* p = AOTRuntimeConstants::field_addresses_list();
while (*p != nullptr) {
SET_ADDRESS(_extrs, *p++);
}
_extrs_complete = true;
log_debug(aot, codecache, init)("External addresses recorded");
}
@ -1729,6 +1741,11 @@ int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeB
if (addr == (address)-1) { // Static call stub has jump to itself
return id;
}
// Check card_table_base address first since it can point to any address
BarrierSet* bs = BarrierSet::barrier_set();
bool is_const_card_table_base = !UseG1GC && !UseShenandoahGC && bs->is_a(BarrierSet::CardTableBarrierSet);
guarantee(!is_const_card_table_base || addr != ci_card_table_address_const(), "sanity");
// Seach for C string
id = id_for_C_string(addr);
if (id >= 0) {
@ -1798,6 +1815,44 @@ int AOTCodeAddressTable::id_for_address(address addr, RelocIterator reloc, CodeB
return id;
}
AOTRuntimeConstants AOTRuntimeConstants::_aot_runtime_constants;
void AOTRuntimeConstants::initialize_from_runtime() {
BarrierSet* bs = BarrierSet::barrier_set();
address card_table_base = nullptr;
uint grain_shift = 0;
#if INCLUDE_G1GC
if (bs->is_a(BarrierSet::G1BarrierSet)) {
grain_shift = G1HeapRegion::LogOfHRGrainBytes;
} else
#endif
#if INCLUDE_SHENANDOAHGC
if (bs->is_a(BarrierSet::ShenandoahBarrierSet)) {
grain_shift = 0;
} else
#endif
if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
CardTable::CardValue* base = ci_card_table_address_const();
assert(base != nullptr, "unexpected byte_map_base");
card_table_base = base;
CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
grain_shift = ctbs->grain_shift();
}
_aot_runtime_constants._card_table_base = card_table_base;
_aot_runtime_constants._grain_shift = grain_shift;
}
address AOTRuntimeConstants::_field_addresses_list[] = {
((address)&_aot_runtime_constants._card_table_base),
((address)&_aot_runtime_constants._grain_shift),
nullptr
};
address AOTRuntimeConstants::card_table_base_address() {
assert(UseSerialGC || UseParallelGC, "Only these GCs have constant card table base");
return (address)&_aot_runtime_constants._card_table_base;
}
// This is called after initialize() but before init2()
// and _cache is not set yet.
void AOTCodeCache::print_on(outputStream* st) {

View File

@ -25,6 +25,7 @@
#ifndef SHARE_CODE_AOTCODECACHE_HPP
#define SHARE_CODE_AOTCODECACHE_HPP
#include "gc/shared/gc_globals.hpp"
#include "runtime/stubInfo.hpp"
/*
@ -422,4 +423,36 @@ public:
#endif // PRODUCT
};
// code cache internal runtime constants area used by AOT code
class AOTRuntimeConstants {
friend class AOTCodeCache;
private:
address _card_table_base;
uint _grain_shift;
static address _field_addresses_list[];
static AOTRuntimeConstants _aot_runtime_constants;
// private constructor for unique singleton
AOTRuntimeConstants() { }
// private for use by friend class AOTCodeCache
static void initialize_from_runtime();
public:
#if INCLUDE_CDS
static bool contains(address adr) {
address base = (address)&_aot_runtime_constants;
address hi = base + sizeof(AOTRuntimeConstants);
return (base <= adr && adr < hi);
}
static address card_table_base_address();
static address grain_shift_address() { return (address)&_aot_runtime_constants._grain_shift; }
static address* field_addresses_list() {
return _field_addresses_list;
}
#else
static bool contains(address adr) { return false; }
static address card_table_base_address() { return nullptr; }
static address grain_shift_address() { return nullptr; }
static address* field_addresses_list() { return nullptr; }
#endif
};
#endif // SHARE_CODE_AOTCODECACHE_HPP

View File

@ -25,6 +25,7 @@
#ifndef SHARE_GC_G1_G1BARRIERSET_HPP
#define SHARE_GC_G1_G1BARRIERSET_HPP
#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/shared/bufferNode.hpp"
#include "gc/shared/cardTable.hpp"
@ -116,6 +117,8 @@ class G1BarrierSet: public CardTableBarrierSet {
virtual void print_on(outputStream* st) const;
virtual uint grain_shift() { return G1HeapRegion::LogOfHRGrainBytes; }
// Callbacks for runtime accesses.
template <DecoratorSet decorators, typename BarrierSetT = G1BarrierSet>
class AccessBarrier: public CardTableBarrierSet::AccessBarrier<decorators, BarrierSetT> {

View File

@ -22,6 +22,7 @@
*
*/
#include "code/aotCodeCache.hpp"
#include "gc/shared/c1/cardTableBarrierSetC1.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
@ -123,6 +124,7 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Op
assert(addr->is_register(), "must be a register at this point");
#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
assert(!AOTCodeCache::is_on(), "this path is not implemented");
gen->CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
#else
LIR_Opr tmp = gen->new_pointer_register();
@ -135,6 +137,17 @@ void CardTableBarrierSetC1::post_barrier(LIRAccess& access, LIR_Opr addr, LIR_Op
}
LIR_Address* card_addr;
#if INCLUDE_CDS
if (AOTCodeCache::is_on_for_dump()) {
// load the card table address from the AOT Runtime Constants area
LIR_Opr byte_map_base_adr = LIR_OprFact::intptrConst(AOTRuntimeConstants::card_table_base_address());
LIR_Opr byte_map_base_reg = gen->new_pointer_register();
__ move(byte_map_base_adr, byte_map_base_reg);
LIR_Address* byte_map_base_indirect = new LIR_Address(byte_map_base_reg, 0, T_LONG);
__ move(byte_map_base_indirect, byte_map_base_reg);
card_addr = new LIR_Address(tmp, byte_map_base_reg, T_BYTE);
} else
#endif
if (gen->can_inline_as_constant(card_table_base)) {
card_addr = new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE);
} else {

View File

@ -23,6 +23,7 @@
*/
#include "ci/ciUtilities.hpp"
#include "code/aotCodeCache.hpp"
#include "gc/shared/c2/cardTableBarrierSetC2.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
@ -114,13 +115,20 @@ Node* CardTableBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access
return result;
}
Node* CardTableBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
Node* CardTableBarrierSetC2::byte_map_base_node(IdealKit* kit) const {
// Get base of card map
#if INCLUDE_CDS
if (AOTCodeCache::is_on_for_dump()) {
// load the card table address from the AOT Runtime Constants area
Node* byte_map_base_adr = kit->makecon(TypeRawPtr::make(AOTRuntimeConstants::card_table_base_address()));
return kit->load_aot_const(byte_map_base_adr, TypeRawPtr::NOTNULL);
}
#endif
CardTable::CardValue* card_table_base = ci_card_table_address_const();
if (card_table_base != nullptr) {
return kit->makecon(TypeRawPtr::make((address)card_table_base));
} else {
return kit->null();
return kit->makecon(Type::get_zero_type(T_ADDRESS));
}
}
@ -168,7 +176,7 @@ void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
Node* card_offset = __ URShiftX(cast, __ ConI(CardTable::card_shift()));
// Combine card table base and card offset
Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset);
Node* card_adr = __ AddP(__ top(), byte_map_base_node(&ideal), card_offset);
// Get the alias_index for raw card-mark memory
int adr_type = Compile::AliasIdxRaw;

View File

@ -43,7 +43,7 @@ protected:
Node* new_val, const Type* value_type) const;
virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
Node* byte_map_base_node(GraphKit* kit) const;
Node* byte_map_base_node(IdealKit* kit) const;
public:
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const;

View File

@ -103,6 +103,10 @@ public:
virtual void print_on(outputStream* st) const;
// The AOT code cache manager needs to know the region grain size
// shift for some barrier sets.
virtual uint grain_shift() { return 0; }
template <DecoratorSet decorators, typename BarrierSetT = CardTableBarrierSet>
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
typedef BarrierSet::AccessBarrier<decorators, BarrierSetT> Raw;

View File

@ -41,9 +41,9 @@ bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
return true;
}
ShenandoahReentrantLock* lock = ShenandoahNMethod::lock_for_nmethod(nm);
ShenandoahNMethodLock* lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != nullptr, "Must be");
ShenandoahReentrantLocker locker(lock);
ShenandoahNMethodLocker locker(lock);
if (!is_armed(nm)) {
// Some other thread managed to complete while we were

View File

@ -136,13 +136,13 @@ public:
assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
if (nm->is_unloading()) {
ShenandoahReentrantLocker locker(nm_data->lock());
ShenandoahNMethodLocker locker(nm_data->lock());
nm->unlink();
return;
}
{
ShenandoahReentrantLocker locker(nm_data->lock());
ShenandoahNMethodLocker locker(nm_data->lock());
// Heal oops
if (_bs->is_armed(nm)) {
@ -154,7 +154,7 @@ public:
}
// Clear compiled ICs and exception caches
ShenandoahReentrantLocker locker(nm_data->ic_lock());
ShenandoahNMethodLocker locker(nm_data->ic_lock());
nm->unload_nmethod_caches(_unloading_occurred);
}
};

View File

@ -1023,7 +1023,7 @@ public:
void do_nmethod(nmethod* n) {
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
ShenandoahReentrantLocker locker(data->lock());
ShenandoahNMethodLocker locker(data->lock());
// Setup EvacOOM scope below reentrant lock to avoid deadlock with
// nmethod_entry_barrier
ShenandoahEvacOOMScope oom;

View File

@ -1194,6 +1194,18 @@ void ShenandoahRegionPartitions::assert_bounds() {
assert(_humongous_waste[int(ShenandoahFreeSetPartitionId::Mutator)] == young_humongous_waste,
"Mutator humongous waste must match");
}
inline void ShenandoahRegionPartitions::assert_bounds_sanity() {
for (uint8_t i = 0; i < UIntNumPartitions; i++) {
ShenandoahFreeSetPartitionId partition = static_cast<ShenandoahFreeSetPartitionId>(i);
assert(leftmost(partition) == _max || membership(leftmost(partition)) == partition, "Left most boundry must be sane");
assert(rightmost(partition) == -1 || membership(rightmost(partition)) == partition, "Right most boundry must be sane");
assert(leftmost_empty(partition) == _max || leftmost_empty(partition) >= leftmost(partition), "Left most empty must be sane");
assert(rightmost_empty(partition) == -1 || rightmost_empty(partition) <= rightmost(partition), "Right most empty must be sane");
}
}
#endif
ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
@ -1654,6 +1666,12 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
// Not old collector alloc, so this is a young collector gclab or shared allocation
orig_partition = ShenandoahFreeSetPartitionId::Collector;
}
DEBUG_ONLY(bool boundary_changed = false;)
if ((result != nullptr) && in_new_region) {
_partitions.one_region_is_no_longer_empty(orig_partition);
DEBUG_ONLY(boundary_changed = true;)
}
if (alloc_capacity(r) < PLAB::min_size() * HeapWordSize) {
// Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
// Note that retire_from_partition() increases used to account for waste.
@ -1662,15 +1680,11 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
// then retire the region so that subsequent searches can find available memory more quickly.
size_t idx = r->index();
if ((result != nullptr) && in_new_region) {
_partitions.one_region_is_no_longer_empty(orig_partition);
}
size_t waste_bytes = _partitions.retire_from_partition(orig_partition, idx, r->used());
DEBUG_ONLY(boundary_changed = true;)
if (req.is_mutator_alloc() && (waste_bytes > 0)) {
increase_bytes_allocated(waste_bytes);
}
} else if ((result != nullptr) && in_new_region) {
_partitions.one_region_is_no_longer_empty(orig_partition);
}
switch (orig_partition) {
@ -1711,7 +1725,13 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
default:
assert(false, "won't happen");
}
_partitions.assert_bounds();
#ifdef ASSERT
if (boundary_changed) {
_partitions.assert_bounds();
} else {
_partitions.assert_bounds_sanity();
}
#endif
return result;
}

View File

@ -32,8 +32,8 @@
#include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
#include "logging/logStream.hpp"
typedef ShenandoahLock ShenandoahRebuildLock;
typedef ShenandoahLocker ShenandoahRebuildLocker;
typedef ShenandoahLock ShenandoahRebuildLock;
typedef ShenandoahLocker<ShenandoahRebuildLock> ShenandoahRebuildLocker;
// Each ShenandoahHeapRegion is associated with a ShenandoahFreeSetPartitionId.
enum class ShenandoahFreeSetPartitionId : uint8_t {
@ -402,6 +402,9 @@ public:
// idx <= rightmost
// }
void assert_bounds() NOT_DEBUG_RETURN;
// this checks certain sanity conditions related to the bounds with much less effort than is required to
// more rigorously enforce correctness as is done by assert_bounds()
inline void assert_bounds_sanity() NOT_DEBUG_RETURN;
};
// Publicly, ShenandoahFreeSet represents memory that is available to mutator threads. The public capacity(), used(),

View File

@ -605,7 +605,8 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x
ShenandoahOldGeneration* old_gen = old_generation();
size_t old_capacity = old_gen->max_capacity();
size_t old_usage = old_gen->used(); // includes humongous waste
size_t old_available = ((old_capacity >= old_usage)? old_capacity - old_usage: 0) + old_trashed_regions * region_size_bytes;
size_t old_currently_available =
((old_capacity >= old_usage)? old_capacity - old_usage: 0) + old_trashed_regions * region_size_bytes;
ShenandoahYoungGeneration* young_gen = young_generation();
size_t young_capacity = young_gen->max_capacity();
@ -621,7 +622,8 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x
size_t young_reserve = (young_generation()->max_capacity() * ShenandoahEvacReserve) / 100;
// If ShenandoahOldEvacPercent equals 100, max_old_reserve is limited only by mutator_xfer_limit and young_reserve
const size_t bound_on_old_reserve = ((old_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacPercent) / 100;
const size_t bound_on_old_reserve =
((old_currently_available + mutator_xfer_limit + young_reserve) * ShenandoahOldEvacPercent) / 100;
size_t proposed_max_old = ((ShenandoahOldEvacPercent == 100)?
bound_on_old_reserve:
MIN2((young_reserve * ShenandoahOldEvacPercent) / (100 - ShenandoahOldEvacPercent),
@ -631,68 +633,105 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x
}
// Decide how much old space we should reserve for a mixed collection
size_t reserve_for_mixed = 0;
size_t proposed_reserve_for_mixed = 0;
const size_t old_fragmented_available =
old_available - (old_generation()->free_unaffiliated_regions() + old_trashed_regions) * region_size_bytes;
old_currently_available - (old_generation()->free_unaffiliated_regions() + old_trashed_regions) * region_size_bytes;
if (old_fragmented_available > proposed_max_old) {
// After we've promoted regions in place, there may be an abundance of old-fragmented available memory,
// even more than the desired percentage for old reserve. We cannot transfer these fragmented regions back
// to young. Instead we make the best of the situation by using this fragmented memory for both promotions
// and evacuations.
// In this case, the old_fragmented_available is greater than the desired amount of evacuation to old.
// We'll use all of this memory to hold results of old evacuation, and we'll give back to the young generation
// any old regions that are not fragmented.
//
// This scenario may happen after we have promoted many regions in place, and each of these regions had non-zero
// unused memory, so there is now an abundance of old-fragmented available memory, even more than the desired
// percentage for old reserve. We cannot transfer these fragmented regions back to young. Instead we make the
// best of the situation by using this fragmented memory for both promotions and evacuations.
proposed_max_old = old_fragmented_available;
}
size_t reserve_for_promo = old_fragmented_available;
// Otherwise: old_fragmented_available <= proposed_max_old. Do not shrink proposed_max_old from the original computation.
// Though we initially set proposed_reserve_for_promo to equal the entirety of old fragmented available, we have the
// opportunity below to shift some of this memory into the proposed_reserve_for_mixed.
size_t proposed_reserve_for_promo = old_fragmented_available;
const size_t max_old_reserve = proposed_max_old;
const size_t mixed_candidate_live_memory = old_generation()->unprocessed_collection_candidates_live_memory();
const bool doing_mixed = (mixed_candidate_live_memory > 0);
if (doing_mixed) {
// We want this much memory to be unfragmented in order to reliably evacuate old. This is conservative because we
// may not evacuate the entirety of unprocessed candidates in a single mixed evacuation.
// In the ideal, all of the memory reserved for mixed evacuation would be unfragmented, but we don't enforce
// this. Note that the initial value of max_evac_need is conservative because we may not evacuate all of the
// remaining mixed evacuation candidates in a single cycle.
const size_t max_evac_need = (size_t) (mixed_candidate_live_memory * ShenandoahOldEvacWaste);
assert(old_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
assert(old_currently_available >= old_generation()->free_unaffiliated_regions() * region_size_bytes,
"Unaffiliated available must be less than total available");
// We prefer to evacuate all of mixed into unfragmented memory, and will expand old in order to do so, unless
// we already have too much fragmented available memory in old.
reserve_for_mixed = max_evac_need;
if (reserve_for_mixed + reserve_for_promo > max_old_reserve) {
// In this case, we'll allow old-evac to target some of the fragmented old memory.
size_t excess_reserves = (reserve_for_mixed + reserve_for_promo) - max_old_reserve;
if (reserve_for_promo > excess_reserves) {
reserve_for_promo -= excess_reserves;
proposed_reserve_for_mixed = max_evac_need;
if (proposed_reserve_for_mixed + proposed_reserve_for_promo > max_old_reserve) {
// We're trying to reserve more memory than is available. So we need to shrink our reserves.
size_t excess_reserves = (proposed_reserve_for_mixed + proposed_reserve_for_promo) - max_old_reserve;
// We need to shrink reserves by excess_reserves. We prefer to shrink by reducing promotion, giving priority to mixed
// evacuation. If the promotion reserve is larger than the amount we need to shrink by, do all the shrinkage there.
if (proposed_reserve_for_promo > excess_reserves) {
proposed_reserve_for_promo -= excess_reserves;
} else {
excess_reserves -= reserve_for_promo;
reserve_for_promo = 0;
reserve_for_mixed -= excess_reserves;
// Otherwise, we'll shrink promotion reserve to zero and we'll shrink the mixed-evac reserve by the remaining excess.
excess_reserves -= proposed_reserve_for_promo;
proposed_reserve_for_promo = 0;
proposed_reserve_for_mixed -= excess_reserves;
}
}
}
assert(proposed_reserve_for_mixed + proposed_reserve_for_promo <= max_old_reserve,
"Reserve for mixed (%zu) plus reserve for promotions (%zu) must be less than maximum old reserve (%zu)",
proposed_reserve_for_mixed, proposed_reserve_for_promo, max_old_reserve);
// Decide how much additional space we should reserve for promotions from young. We give priority to mixed evacations
// over promotions.
const size_t promo_load = old_generation()->get_promotion_potential();
const bool doing_promotions = promo_load > 0;
if (doing_promotions) {
// We've already set aside all of the fragmented available memory within old-gen to represent old objects
// to be promoted from young generation. promo_load represents the memory that we anticipate to be promoted
// from regions that have reached tenure age. In the ideal, we will always use fragmented old-gen memory
// to hold individually promoted objects and will use unfragmented old-gen memory to represent the old-gen
// evacuation workloa.
// We're promoting and have an estimate of memory to be promoted from aged regions
assert(max_old_reserve >= (reserve_for_mixed + reserve_for_promo), "Sanity");
const size_t available_for_additional_promotions = max_old_reserve - (reserve_for_mixed + reserve_for_promo);
size_t promo_need = (size_t)(promo_load * ShenandoahPromoEvacWaste);
if (promo_need > reserve_for_promo) {
reserve_for_promo += MIN2(promo_need - reserve_for_promo, available_for_additional_promotions);
// promo_load represents the combined total of live memory within regions that have reached tenure age. The true
// promotion potential is larger than this, because individual objects within regions that have not yet reached tenure
// age may be promotable. On the other hand, some of the objects that we intend to promote in the next GC cycle may
// die before they are next marked. In the future, the promo_load will include the total size of tenurable objects
// residing in regions that have not yet reached tenure age.
if (doing_promotions) {
// We are always doing promotions, even when old_generation->get_promotion_potential() returns 0. As currently implemented,
// get_promotion_potential() only knows the total live memory contained within young-generation regions whose age is
// tenurable. It does not know whether that memory will still be live at the end of the next mark cycle, and it doesn't
// know how much memory is contained within objects whose individual ages are tenurable, which reside in regions with
// non-tenurable age. We use this, as adjusted by ShenandoahPromoEvacWaste, as an approximation of the total amount of
// memory to be promoted. In the near future, we expect to implement a change that will allow get_promotion_potential()
// to account also for the total memory contained within individual objects that are tenure-ready even when they do
// not reside in aged regions. This will represent a conservative over approximation of promotable memory because
// some of these objects may die before the next GC cycle executes.
// Be careful not to ask for too much promotion reserves. We have observed jtreg test failures under which a greedy
// promotion reserve causes a humongous allocation which is awaiting a full GC to fail (specifically
// gc/TestAllocHumongousFragment.java). This happens if too much of the memory reclaimed by the full GC
// is immediately reserved so that it cannot be allocated by the waiting mutator. It's not clear that this
// particular test is representative of the needs of typical GenShen users. It is really a test of high frequency
// Full GCs under heap fragmentation stress.
size_t promo_need = (size_t) (promo_load * ShenandoahPromoEvacWaste);
if (promo_need > proposed_reserve_for_promo) {
const size_t available_for_additional_promotions =
max_old_reserve - (proposed_reserve_for_mixed + proposed_reserve_for_promo);
if (proposed_reserve_for_promo + available_for_additional_promotions >= promo_need) {
proposed_reserve_for_promo = promo_need;
} else {
proposed_reserve_for_promo += available_for_additional_promotions;
}
}
// We've already reserved all the memory required for the promo_load, and possibly more. The excess
// can be consumed by objects promoted from regions that have not yet reached tenure age.
}
// else, leave proposed_reserve_for_promo as is. By default, it is initialized to represent old_fragmented_available.
// This is the total old we want to reserve (initialized to the ideal reserve)
size_t old_reserve = reserve_for_mixed + reserve_for_promo;
size_t proposed_old_reserve = proposed_reserve_for_mixed + proposed_reserve_for_promo;
// We now check if the old generation is running a surplus or a deficit.
size_t old_region_deficit = 0;
@ -702,68 +741,70 @@ void ShenandoahGenerationalHeap::compute_old_generation_balance(size_t mutator_x
// align the mutator_xfer_limit on region size
mutator_xfer_limit = mutator_region_xfer_limit * region_size_bytes;
if (old_available >= old_reserve) {
if (old_currently_available >= proposed_old_reserve) {
// We are running a surplus, so the old region surplus can go to young
const size_t old_surplus = old_available - old_reserve;
const size_t old_surplus = old_currently_available - proposed_old_reserve;
old_region_surplus = old_surplus / region_size_bytes;
const size_t unaffiliated_old_regions = old_generation()->free_unaffiliated_regions() + old_trashed_regions;
old_region_surplus = MIN2(old_region_surplus, unaffiliated_old_regions);
old_generation()->set_region_balance(checked_cast<ssize_t>(old_region_surplus));
} else if (old_available + mutator_xfer_limit >= old_reserve) {
// Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there
size_t old_deficit = old_reserve - old_available;
old_currently_available -= old_region_surplus * region_size_bytes;
young_available += old_region_surplus * region_size_bytes;
} else if (old_currently_available + mutator_xfer_limit >= proposed_old_reserve) {
// We know that old_currently_available < proposed_old_reserve because above test failed. Expand old_currently_available.
// Mutator's xfer limit is sufficient to satisfy our need: transfer all memory from there.
size_t old_deficit = proposed_old_reserve - old_currently_available;
old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
old_currently_available += old_region_deficit * region_size_bytes;
young_available -= old_region_deficit * region_size_bytes;
} else {
// We'll try to xfer from both mutator excess and from young collector reserve
size_t available_reserves = old_available + young_reserve + mutator_xfer_limit;
size_t old_entitlement = (available_reserves * ShenandoahOldEvacPercent) / 100;
// We know that (old_currently_available < proposed_old_reserve) and
// (old_currently_available + mutator_xfer_limit < proposed_old_reserve) because above tests failed.
// We need to shrink proposed_old_reserves.
// Round old_entitlement down to nearest multiple of regions to be transferred to old
size_t entitled_xfer = old_entitlement - old_available;
entitled_xfer = region_size_bytes * (entitled_xfer / region_size_bytes);
size_t unaffiliated_young_regions = young_generation()->free_unaffiliated_regions();
size_t unaffiliated_young_memory = unaffiliated_young_regions * region_size_bytes;
if (entitled_xfer > unaffiliated_young_memory) {
entitled_xfer = unaffiliated_young_memory;
}
old_entitlement = old_available + entitled_xfer;
if (old_entitlement < old_reserve) {
// There's not enough memory to satisfy our desire. Scale back our old-gen intentions.
size_t budget_overrun = old_reserve - old_entitlement;;
if (reserve_for_promo > budget_overrun) {
reserve_for_promo -= budget_overrun;
old_reserve -= budget_overrun;
} else {
budget_overrun -= reserve_for_promo;
reserve_for_promo = 0;
reserve_for_mixed = (reserve_for_mixed > budget_overrun)? reserve_for_mixed - budget_overrun: 0;
old_reserve = reserve_for_promo + reserve_for_mixed;
}
}
// We could potentially shrink young_reserves in order to further expand proposed_old_reserves. Let's not bother. The
// important thing is that we keep a total amount of memory in reserve in preparation for the next GC cycle. At
// the time we choose the next collection set, we'll have an opportunity to shift some of these young reserves
// into old reserves if that makes sense.
// Because of adjustments above, old_reserve may be smaller now than it was when we tested the branch
// condition above: "(old_available + mutator_xfer_limit >= old_reserve)
// Therefore, we do NOT know that: mutator_xfer_limit < old_reserve - old_available
size_t old_deficit = old_reserve - old_available;
old_region_deficit = (old_deficit + region_size_bytes - 1) / region_size_bytes;
// Shrink young_reserve to account for loan to old reserve
const size_t reserve_xfer_regions = old_region_deficit - mutator_region_xfer_limit;
young_reserve -= reserve_xfer_regions * region_size_bytes;
// Start by taking all of mutator_xfer_limit into old_currently_available.
size_t old_region_deficit = mutator_region_xfer_limit;
old_generation()->set_region_balance(0 - checked_cast<ssize_t>(old_region_deficit));
old_currently_available += old_region_deficit * region_size_bytes;
young_available -= old_region_deficit * region_size_bytes;
assert(old_currently_available < proposed_old_reserve,
"Old currently available (%zu) must be less than old reserve (%zu)", old_currently_available, proposed_old_reserve);
// There's not enough memory to satisfy our desire. Scale back our old-gen intentions. We prefer to satisfy
// the budget_overrun entirely from the promotion reserve, if that is large enough. Otherwise, we'll satisfy
// the overrun from a combination of promotion and mixed-evacuation reserves.
size_t budget_overrun = proposed_old_reserve - old_currently_available;
if (proposed_reserve_for_promo > budget_overrun) {
proposed_reserve_for_promo -= budget_overrun;
// Dead code:
// proposed_old_reserve -= budget_overrun;
} else {
budget_overrun -= proposed_reserve_for_promo;
proposed_reserve_for_promo = 0;
proposed_reserve_for_mixed = (proposed_reserve_for_mixed > budget_overrun)? proposed_reserve_for_mixed - budget_overrun: 0;
// Dead code:
// Note: proposed_reserve_for_promo is 0 and proposed_reserve_for_mixed may equal 0.
// proposed_old_reserve = proposed_reserve_for_mixed;
}
}
assert(old_region_deficit == 0 || old_region_surplus == 0, "Only surplus or deficit, never both");
assert(young_reserve + reserve_for_mixed + reserve_for_promo <= old_available + young_available,
assert(old_region_deficit == 0 || old_region_surplus == 0,
"Only surplus (%zu) or deficit (%zu), never both", old_region_surplus, old_region_deficit);
assert(young_reserve + proposed_reserve_for_mixed + proposed_reserve_for_promo <= old_currently_available + young_available,
"Cannot reserve more memory than is available: %zu + %zu + %zu <= %zu + %zu",
young_reserve, reserve_for_mixed, reserve_for_promo, old_available, young_available);
young_reserve, proposed_reserve_for_mixed, proposed_reserve_for_promo, old_currently_available, young_available);
// deficit/surplus adjustments to generation sizes will precede rebuild
young_generation()->set_evacuation_reserve(young_reserve);
old_generation()->set_evacuation_reserve(reserve_for_mixed);
old_generation()->set_promoted_reserve(reserve_for_promo);
old_generation()->set_evacuation_reserve(proposed_reserve_for_mixed);
old_generation()->set_promoted_reserve(proposed_reserve_for_promo);
}
void ShenandoahGenerationalHeap::coalesce_and_fill_old_regions(bool concurrent) {

View File

@ -2834,3 +2834,13 @@ void ShenandoahHeap::log_heap_status(const char* msg) const {
global_generation()->log_status(msg);
}
}
ShenandoahHeapLocker::ShenandoahHeapLocker(ShenandoahHeapLock* lock, bool allow_block_for_safepoint) : _lock(lock) {
#ifdef ASSERT
ShenandoahFreeSet* free_set = ShenandoahHeap::heap()->free_set();
// free_set is nullptr only at pre-initialized state
assert(free_set == nullptr || !free_set->rebuild_lock()->owned_by_self(), "Dead lock, can't acquire heap lock while holding free-set rebuild lock");
assert(_lock != nullptr, "Must not");
#endif
_lock->lock(allow_block_for_safepoint);
}

View File

@ -117,9 +117,23 @@ public:
virtual bool is_thread_safe() { return false; }
};
typedef ShenandoahLock ShenandoahHeapLock;
typedef ShenandoahLocker ShenandoahHeapLocker;
typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
typedef ShenandoahLock ShenandoahHeapLock;
// ShenandoahHeapLocker implements locker to assure mutually exclusive access to the global heap data structures.
// Asserts in the implementation detect potential deadlock usage with regards the rebuild lock that is present
// in ShenandoahFreeSet. Whenever both locks are acquired, this lock should be acquired before the
// ShenandoahFreeSet rebuild lock.
class ShenandoahHeapLocker : public StackObj {
private:
ShenandoahHeapLock* _lock;
public:
ShenandoahHeapLocker(ShenandoahHeapLock* lock, bool allow_block_for_safepoint = false);
~ShenandoahHeapLocker() {
_lock->unlock();
}
};
typedef Stack<oop, mtGC> ShenandoahScanObjectStack;
// Shenandoah GC is low-pause concurrent GC that uses a load reference barrier
// for concurent evacuation and a snapshot-at-the-beginning write barrier for

View File

@ -93,7 +93,7 @@ ShenandoahSimpleLock::ShenandoahSimpleLock() {
assert(os::mutex_init_done(), "Too early!");
}
void ShenandoahSimpleLock::lock() {
void ShenandoahSimpleLock::lock(bool allow_block_for_safepoint) {
_lock.lock();
}
@ -101,28 +101,31 @@ void ShenandoahSimpleLock::unlock() {
_lock.unlock();
}
ShenandoahReentrantLock::ShenandoahReentrantLock() :
ShenandoahSimpleLock(), _owner(nullptr), _count(0) {
assert(os::mutex_init_done(), "Too early!");
template<typename Lock>
ShenandoahReentrantLock<Lock>::ShenandoahReentrantLock() :
Lock(), _owner(nullptr), _count(0) {
}
ShenandoahReentrantLock::~ShenandoahReentrantLock() {
template<typename Lock>
ShenandoahReentrantLock<Lock>::~ShenandoahReentrantLock() {
assert(_count == 0, "Unbalance");
}
void ShenandoahReentrantLock::lock() {
template<typename Lock>
void ShenandoahReentrantLock<Lock>::lock(bool allow_block_for_safepoint) {
Thread* const thread = Thread::current();
Thread* const owner = _owner.load_relaxed();
if (owner != thread) {
ShenandoahSimpleLock::lock();
Lock::lock(allow_block_for_safepoint);
_owner.store_relaxed(thread);
}
_count++;
}
void ShenandoahReentrantLock::unlock() {
template<typename Lock>
void ShenandoahReentrantLock<Lock>::unlock() {
assert(owned_by_self(), "Invalid owner");
assert(_count > 0, "Invalid count");
@ -130,12 +133,17 @@ void ShenandoahReentrantLock::unlock() {
if (_count == 0) {
_owner.store_relaxed((Thread*)nullptr);
ShenandoahSimpleLock::unlock();
Lock::unlock();
}
}
bool ShenandoahReentrantLock::owned_by_self() const {
template<typename Lock>
bool ShenandoahReentrantLock<Lock>::owned_by_self() const {
Thread* const thread = Thread::current();
Thread* const owner = _owner.load_relaxed();
return owner == thread;
}
// Explicit template instantiation
template class ShenandoahReentrantLock<ShenandoahSimpleLock>;
template class ShenandoahReentrantLock<ShenandoahLock>;

View File

@ -31,7 +31,7 @@
#include "runtime/javaThread.hpp"
#include "runtime/safepoint.hpp"
class ShenandoahLock {
class ShenandoahLock {
private:
enum LockState { unlocked = 0, locked = 1 };
@ -48,7 +48,7 @@ private:
public:
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
void lock(bool allow_block_for_safepoint) {
void lock(bool allow_block_for_safepoint = false) {
assert(_owner.load_relaxed() != Thread::current(), "reentrant locking attempt, would deadlock");
if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) ||
@ -83,34 +83,19 @@ public:
}
};
class ShenandoahLocker : public StackObj {
private:
ShenandoahLock* const _lock;
public:
ShenandoahLocker(ShenandoahLock* lock, bool allow_block_for_safepoint = false) : _lock(lock) {
if (_lock != nullptr) {
_lock->lock(allow_block_for_safepoint);
}
}
~ShenandoahLocker() {
if (_lock != nullptr) {
_lock->unlock();
}
}
};
// Simple lock using PlatformMonitor
class ShenandoahSimpleLock {
private:
PlatformMonitor _lock; // native lock
public:
ShenandoahSimpleLock();
virtual void lock();
virtual void unlock();
void lock(bool allow_block_for_safepoint = false);
void unlock();
};
class ShenandoahReentrantLock : public ShenandoahSimpleLock {
// templated reentrant lock
template<typename Lock>
class ShenandoahReentrantLock : public Lock {
private:
Atomic<Thread*> _owner;
uint64_t _count;
@ -119,30 +104,25 @@ public:
ShenandoahReentrantLock();
~ShenandoahReentrantLock();
virtual void lock();
virtual void unlock();
void lock(bool allow_block_for_safepoint = false);
void unlock();
// If the lock already owned by this thread
bool owned_by_self() const ;
};
class ShenandoahReentrantLocker : public StackObj {
private:
ShenandoahReentrantLock* const _lock;
// template based ShenandoahLocker
template<typename Lock>
class ShenandoahLocker : public StackObj {
Lock* const _lock;
public:
ShenandoahReentrantLocker(ShenandoahReentrantLock* lock) :
_lock(lock) {
if (_lock != nullptr) {
_lock->lock();
}
ShenandoahLocker(Lock* lock, bool allow_block_for_safepoint = false) : _lock(lock) {
assert(_lock != nullptr, "Must not");
_lock->lock(allow_block_for_safepoint);
}
~ShenandoahReentrantLocker() {
if (_lock != nullptr) {
assert(_lock->owned_by_self(), "Must be owner");
_lock->unlock();
}
~ShenandoahLocker() {
_lock->unlock();
}
};

View File

@ -241,7 +241,7 @@ void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
assert(nm == data->nm(), "Must be same nmethod");
// Prevent updating a nmethod while concurrent iteration is in progress.
wait_until_concurrent_iteration_done();
ShenandoahReentrantLocker data_locker(data->lock());
ShenandoahNMethodLocker data_locker(data->lock());
data->update();
} else {
// For a new nmethod, we can safely append it to the list, because

View File

@ -33,6 +33,10 @@
#include "runtime/atomic.hpp"
#include "utilities/growableArray.hpp"
// Use ShenandoahReentrantLock as ShenandoahNMethodLock
typedef ShenandoahReentrantLock<ShenandoahSimpleLock> ShenandoahNMethodLock;
typedef ShenandoahLocker<ShenandoahNMethodLock> ShenandoahNMethodLocker;
// ShenandoahNMethod tuple records the internal locations of oop slots within reclocation stream in
// the nmethod. This allows us to quickly scan the oops without doing the nmethod-internal scans,
// that sometimes involves parsing the machine code. Note it does not record the oops themselves,
@ -44,16 +48,16 @@ private:
int _oops_count;
bool _has_non_immed_oops;
bool _unregistered;
ShenandoahReentrantLock _lock;
ShenandoahReentrantLock _ic_lock;
ShenandoahNMethodLock _lock;
ShenandoahNMethodLock _ic_lock;
public:
ShenandoahNMethod(nmethod *nm, GrowableArray<oop*>& oops, bool has_non_immed_oops);
~ShenandoahNMethod();
inline nmethod* nm() const;
inline ShenandoahReentrantLock* lock();
inline ShenandoahReentrantLock* ic_lock();
inline ShenandoahNMethodLock* lock();
inline ShenandoahNMethodLock* ic_lock();
inline void oops_do(OopClosure* oops, bool fix_relocations = false);
// Update oops when the nmethod is re-registered
void update();
@ -61,8 +65,8 @@ public:
inline bool is_unregistered() const;
static ShenandoahNMethod* for_nmethod(nmethod* nm);
static inline ShenandoahReentrantLock* lock_for_nmethod(nmethod* nm);
static inline ShenandoahReentrantLock* ic_lock_for_nmethod(nmethod* nm);
static inline ShenandoahNMethodLock* lock_for_nmethod(nmethod* nm);
static inline ShenandoahNMethodLock* ic_lock_for_nmethod(nmethod* nm);
static void heal_nmethod(nmethod* nm);
static inline void heal_nmethod_metadata(ShenandoahNMethod* nmethod_data);

View File

@ -35,11 +35,11 @@ nmethod* ShenandoahNMethod::nm() const {
return _nm;
}
ShenandoahReentrantLock* ShenandoahNMethod::lock() {
ShenandoahNMethodLock* ShenandoahNMethod::lock() {
return &_lock;
}
ShenandoahReentrantLock* ShenandoahNMethod::ic_lock() {
ShenandoahNMethodLock* ShenandoahNMethod::ic_lock() {
return &_ic_lock;
}
@ -85,11 +85,11 @@ void ShenandoahNMethod::attach_gc_data(nmethod* nm, ShenandoahNMethod* gc_data)
nm->set_gc_data<ShenandoahNMethod>(gc_data);
}
ShenandoahReentrantLock* ShenandoahNMethod::lock_for_nmethod(nmethod* nm) {
ShenandoahNMethodLock* ShenandoahNMethod::lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->lock();
}
ShenandoahReentrantLock* ShenandoahNMethod::ic_lock_for_nmethod(nmethod* nm) {
ShenandoahNMethodLock* ShenandoahNMethod::ic_lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->ic_lock();
}

View File

@ -504,7 +504,7 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc
if (!CompressedOops::is_null(*list)) {
oop head = lrb(CompressedOops::decode_not_null(*list));
shenandoah_assert_not_in_cset_except(&head, head, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
oop prev = AtomicAccess::xchg(&_pending_list, head);
oop prev = _pending_list.exchange(head);
set_oop_field(p, prev);
if (prev == nullptr) {
// First to prepend to list, record tail
@ -519,14 +519,14 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahRefProcThreadLoc
void ShenandoahReferenceProcessor::work() {
// Process discovered references
uint max_workers = ShenandoahHeap::heap()->max_workers();
uint worker_id = AtomicAccess::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1;
uint worker_id = _iterate_discovered_list_id.fetch_then_add(1U, memory_order_relaxed);
while (worker_id < max_workers) {
if (UseCompressedOops) {
process_references<narrowOop>(_ref_proc_thread_locals[worker_id], worker_id);
} else {
process_references<oop>(_ref_proc_thread_locals[worker_id], worker_id);
}
worker_id = AtomicAccess::add(&_iterate_discovered_list_id, 1U, memory_order_relaxed) - 1;
worker_id = _iterate_discovered_list_id.fetch_then_add(1U, memory_order_relaxed);
}
}
@ -559,7 +559,7 @@ public:
void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Phase phase, WorkerThreads* workers, bool concurrent) {
AtomicAccess::release_store_fence(&_iterate_discovered_list_id, 0U);
_iterate_discovered_list_id.release_store_fence(0U);
// Process discovered lists
ShenandoahReferenceProcessorTask task(phase, concurrent, this);
@ -576,7 +576,7 @@ void ShenandoahReferenceProcessor::process_references(ShenandoahPhaseTimings::Ph
void ShenandoahReferenceProcessor::enqueue_references_locked() {
// Prepend internal pending list to external pending list
shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list, ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
shenandoah_assert_not_in_cset_except(&_pending_list, _pending_list.load_relaxed(), ShenandoahHeap::heap()->cancelled_gc() || !ShenandoahLoadRefBarrier);
// During reference processing, we maintain a local list of references that are identified by
// _pending_list and _pending_list_tail. _pending_list_tail points to the next field of the last Reference object on
@ -589,7 +589,7 @@ void ShenandoahReferenceProcessor::enqueue_references_locked() {
// 2. Overwriting the next field of the last Reference on my local list to point at the previous head of the
// global Universe::_reference_pending_list
oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list);
oop former_head_of_global_list = Universe::swap_reference_pending_list(_pending_list.load_relaxed());
if (UseCompressedOops) {
set_oop_field<narrowOop>(reinterpret_cast<narrowOop*>(_pending_list_tail), former_head_of_global_list);
} else {
@ -598,7 +598,7 @@ void ShenandoahReferenceProcessor::enqueue_references_locked() {
}
void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
if (_pending_list == nullptr) {
if (_pending_list.load_relaxed() == nullptr) {
// Nothing to enqueue
return;
}
@ -616,7 +616,7 @@ void ShenandoahReferenceProcessor::enqueue_references(bool concurrent) {
}
// Reset internal pending list
_pending_list = nullptr;
_pending_list.store_relaxed(nullptr);
_pending_list_tail = &_pending_list;
}
@ -640,9 +640,9 @@ void ShenandoahReferenceProcessor::abandon_partial_discovery() {
clean_discovered_list<oop>(_ref_proc_thread_locals[index].discovered_list_addr<oop>());
}
}
if (_pending_list != nullptr) {
oop pending = _pending_list;
_pending_list = nullptr;
if (_pending_list.load_relaxed() != nullptr) {
oop pending = _pending_list.load_relaxed();
_pending_list.store_relaxed(nullptr);
if (UseCompressedOops) {
narrowOop* list = reference_discovered_addr<narrowOop>(pending);
clean_discovered_list<narrowOop>(list);

View File

@ -31,6 +31,7 @@
#include "gc/shared/referenceProcessorStats.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
class ShenandoahMarkRefsSuperClosure;
class WorkerThreads;
@ -133,10 +134,10 @@ private:
ShenandoahRefProcThreadLocal* _ref_proc_thread_locals;
oop _pending_list;
Atomic<oop> _pending_list;
void* _pending_list_tail; // T*
volatile uint _iterate_discovered_list_id;
Atomic<uint> _iterate_discovered_list_id;
ReferenceProcessorStats _stats;

View File

@ -80,7 +80,7 @@ public:
virtual bool has_dead_oop(nmethod* nm) const {
assert(ShenandoahHeap::heap()->is_concurrent_weak_root_in_progress(), "Only for this phase");
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
ShenandoahReentrantLocker locker(data->lock());
ShenandoahNMethodLocker locker(data->lock());
ShenandoahIsUnloadingOopClosure cl;
data->oops_do(&cl);
return cl.is_unloading();
@ -90,14 +90,14 @@ public:
class ShenandoahCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
public:
virtual bool lock(nmethod* nm) {
ShenandoahReentrantLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
ShenandoahNMethodLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
lock->lock();
return true;
}
virtual void unlock(nmethod* nm) {
ShenandoahReentrantLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
ShenandoahNMethodLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
lock->unlock();
}
@ -107,7 +107,7 @@ public:
return true;
}
ShenandoahReentrantLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
ShenandoahNMethodLock* const lock = ShenandoahNMethod::ic_lock_for_nmethod(nm);
assert(lock != nullptr, "Not yet registered?");
return lock->owned_by_self();
}

View File

@ -196,7 +196,7 @@ class ConstantPoolCache: public MetaspaceObj {
#endif
public:
static int size() { return align_metadata_size(sizeof(ConstantPoolCache) / wordSize); }
static int size() { return align_metadata_size(sizeof_auto(ConstantPoolCache) / wordSize); }
private:
// Helpers

View File

@ -360,6 +360,17 @@ Node* IdealKit::load(Node* ctl,
return transform(ld);
}
// Load AOT runtime constant
Node* IdealKit::load_aot_const(Node* adr, const Type* t) {
BasicType bt = t->basic_type();
const TypePtr* adr_type = nullptr; // debug-mode-only argument
DEBUG_ONLY(adr_type = C->get_adr_type(Compile::AliasIdxRaw));
Node* ctl = (Node*)C->root(); // Raw memory access needs control
Node* ld = LoadNode::make(_gvn, ctl, C->immutable_memory(), adr, adr_type, t, bt, MemNode::unordered,
LoadNode::DependsOnlyOnTest, false, false, false, false, 0);
return transform(ld);
}
Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
int adr_idx,
MemNode::MemOrd mo, bool require_atomic_access,

View File

@ -224,6 +224,9 @@ class IdealKit: public StackObj {
MemNode::MemOrd mo = MemNode::unordered,
LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest);
// Load AOT runtime constant
Node* load_aot_const(Node* adr, const Type* t);
// Return the new StoreXNode
Node* store(Node* ctl,
Node* adr,

View File

@ -1228,8 +1228,13 @@ Node* MemNode::can_see_stored_value(Node* st, PhaseValues* phase) const {
}
// LoadVector/StoreVector needs additional check to ensure the types match.
if (st->is_StoreVector()) {
const TypeVect* in_vt = st->as_StoreVector()->vect_type();
const TypeVect* out_vt = as_LoadVector()->vect_type();
if ((Opcode() != Op_LoadVector && Opcode() != Op_StoreVector) || st->Opcode() != Op_StoreVector) {
// Some kind of masked access or gather/scatter
return nullptr;
}
const TypeVect* in_vt = st->as_StoreVector()->vect_type();
const TypeVect* out_vt = is_Load() ? as_LoadVector()->vect_type() : as_StoreVector()->vect_type();
if (in_vt != out_vt) {
return nullptr;
}
@ -3567,8 +3572,11 @@ Node* StoreNode::Identity(PhaseGVN* phase) {
val->in(MemNode::Address)->eqv_uncast(adr) &&
val->in(MemNode::Memory )->eqv_uncast(mem) &&
val->as_Load()->store_Opcode() == Opcode()) {
// Ensure vector type is the same
if (!is_StoreVector() || (mem->is_LoadVector() && as_StoreVector()->vect_type() == mem->as_LoadVector()->vect_type())) {
if (!is_StoreVector()) {
result = mem;
} else if (Opcode() == Op_StoreVector && val->Opcode() == Op_LoadVector &&
as_StoreVector()->vect_type() == val->as_LoadVector()->vect_type()) {
// Ensure both are not masked accesses or gathers/scatters and vector types are the same
result = mem;
}
}

View File

@ -2214,7 +2214,7 @@ bool SuperWord::is_vector_use(Node* use, int u_idx) const {
return true;
}
if (!is_velt_basic_type_compatible_use_def(use, def)) {
if (!is_velt_basic_type_compatible_use_def(use, def, d_pk->size())) {
return false;
}
@ -2280,7 +2280,7 @@ Node_List* PackSet::strided_pack_input_at_index_or_null(const Node_List* pack, c
// Check if the output type of def is compatible with the input type of use, i.e. if the
// types have the same size.
bool SuperWord::is_velt_basic_type_compatible_use_def(Node* use, Node* def) const {
bool SuperWord::is_velt_basic_type_compatible_use_def(Node* use, Node* def, const uint pack_size) const {
assert(in_bb(def) && in_bb(use), "both use and def are in loop");
// Conversions are trivially compatible.
@ -2306,8 +2306,17 @@ bool SuperWord::is_velt_basic_type_compatible_use_def(Node* use, Node* def) cons
type2aelembytes(use_bt) == 4;
}
// Default case: input size of use equals output size of def.
return type2aelembytes(use_bt) == type2aelembytes(def_bt);
// Input size of use equals output size of def
if (type2aelembytes(use_bt) == type2aelembytes(def_bt)) {
return true;
}
// Subword cast: Element sizes differ, but the platform supports a cast to change the def shape to the use shape.
if (VectorCastNode::is_supported_subword_cast(def_bt, use_bt, pack_size)) {
return true;
}
return false;
}
// Return nullptr if success, else failure message

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -653,7 +653,7 @@ private:
// Is use->in(u_idx) a vector use?
bool is_vector_use(Node* use, int u_idx) const;
bool is_velt_basic_type_compatible_use_def(Node* use, Node* def) const;
bool is_velt_basic_type_compatible_use_def(Node* use, Node* def, const uint pack_size) const;
bool do_vtransform() const;
};

View File

@ -254,6 +254,20 @@ VTransformNode* SuperWordVTransformBuilder::get_or_make_vtnode_vector_input_at_i
Node_List* pack_in = _packset.pack_input_at_index_or_null(pack, index);
if (pack_in != nullptr) {
Node* in_p0 = pack_in->at(0);
BasicType def_bt = _vloop_analyzer.types().velt_basic_type(in_p0);
BasicType use_bt = _vloop_analyzer.types().velt_basic_type(p0);
// If the use and def types are different, emit a cast node
if (use_bt != def_bt && !p0->is_Convert() && VectorCastNode::is_supported_subword_cast(def_bt, use_bt, pack->size())) {
VTransformNode* in = get_vtnode(pack_in->at(0));
const VTransformVectorNodeProperties properties = VTransformVectorNodeProperties::make_from_pack(pack, _vloop_analyzer);
VTransformNode* cast = new (_vtransform.arena()) VTransformElementWiseVectorNode(_vtransform, 2, properties, VectorCastNode::opcode(-1, def_bt));
cast->set_req(1, in);
return cast;
}
// Input is a matching pack -> vtnode already exists.
assert(index != 2 || !VectorNode::is_shift(p0), "shift's count cannot be vector");
return get_vtnode(pack_in->at(0));

View File

@ -97,7 +97,7 @@ const Type::TypeInfo Type::_type_info[Type::lastype] = {
{ Bad, T_ILLEGAL, "vectorz:", false, Op_VecZ, relocInfo::none }, // VectorZ
#endif
{ Bad, T_ADDRESS, "anyptr:", false, Op_RegP, relocInfo::none }, // AnyPtr
{ Bad, T_ADDRESS, "rawptr:", false, Op_RegP, relocInfo::none }, // RawPtr
{ Bad, T_ADDRESS, "rawptr:", false, Op_RegP, relocInfo::external_word_type }, // RawPtr
{ Bad, T_OBJECT, "oop:", true, Op_RegP, relocInfo::oop_type }, // OopPtr
{ Bad, T_OBJECT, "inst:", true, Op_RegP, relocInfo::oop_type }, // InstPtr
{ Bad, T_OBJECT, "ary:", true, Op_RegP, relocInfo::oop_type }, // AryPtr

View File

@ -1561,6 +1561,13 @@ bool VectorCastNode::implemented(int opc, uint vlen, BasicType src_type, BasicTy
return false;
}
bool VectorCastNode::is_supported_subword_cast(BasicType def_bt, BasicType use_bt, const uint pack_size) {
assert(def_bt != use_bt, "use and def types must be different");
// Opcode is only required to disambiguate half float, so we pass -1 as it can't be encountered here.
return (is_subword_type(def_bt) || is_subword_type(use_bt)) && VectorCastNode::implemented(-1, pack_size, def_bt, use_bt);
}
Node* VectorCastNode::Identity(PhaseGVN* phase) {
if (!in(1)->is_top()) {
BasicType in_bt = in(1)->bottom_type()->is_vect()->element_basic_type();

View File

@ -1846,6 +1846,7 @@ class VectorCastNode : public VectorNode {
static VectorNode* make(int vopc, Node* n1, BasicType bt, uint vlen);
static int opcode(int opc, BasicType bt, bool is_signed = true);
static bool implemented(int opc, uint vlen, BasicType src_type, BasicType dst_type);
static bool is_supported_subword_cast(BasicType def_bt, BasicType use_bt, const uint pack_size);
virtual Node* Identity(PhaseGVN* phase);
};

View File

@ -975,4 +975,5 @@ public:
virtual VTransformApplyResult apply(VTransformApplyState& apply_state) const override;
NOT_PRODUCT(virtual const char* name() const override { return "StoreVector"; };)
};
#endif // SHARE_OPTO_VTRANSFORM_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -2491,7 +2491,7 @@ SetOrClearFramePopClosure::do_thread(Thread *target) {
_result = JVMTI_ERROR_NO_MORE_FRAMES;
return;
}
assert(_state->get_thread_or_saved() == java_thread, "Must be");
assert(_state->get_thread() == java_thread, "Must be");
RegisterMap reg_map(java_thread,
RegisterMap::UpdateMap::include,

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -151,11 +151,6 @@ bool JvmtiEnvThreadState::is_virtual() {
return _state->is_virtual();
}
// Use _thread_saved if cthread is detached from JavaThread (_thread == nullptr).
JavaThread* JvmtiEnvThreadState::get_thread_or_saved() {
return _state->get_thread_or_saved();
}
JavaThread* JvmtiEnvThreadState::get_thread() {
return _state->get_thread();
}
@ -344,7 +339,7 @@ void JvmtiEnvThreadState::reset_current_location(jvmtiEvent event_type, bool ena
if (enabled) {
// If enabling breakpoint, no need to reset.
// Can't do anything if empty stack.
JavaThread* thread = get_thread_or_saved();
JavaThread* thread = get_thread();
if (event_type == JVMTI_EVENT_SINGLE_STEP &&
((thread == nullptr && is_virtual()) || thread->has_last_Java_frame())) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -170,8 +170,6 @@ public:
inline JvmtiThreadState* jvmti_thread_state() { return _state; }
// use _thread_saved if cthread is detached from JavaThread
JavaThread *get_thread_or_saved();
JavaThread *get_thread();
inline JvmtiEnv *get_env() { return _env; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -217,6 +217,10 @@ class EnterInterpOnlyModeClosure : public HandshakeClosure {
assert(state != nullptr, "sanity check");
assert(state->get_thread() == jt, "handshake unsafe conditions");
assert(jt->jvmti_thread_state() == state, "sanity check");
assert(!jt->is_interp_only_mode(), "sanity check");
assert(!state->is_interp_only_mode(), "sanity check");
if (!state->is_pending_interp_only_mode()) {
_completed = true;
return; // The pending flag has been already cleared, so bail out.
@ -361,7 +365,8 @@ void VM_ChangeSingleStep::doit() {
void JvmtiEventControllerPrivate::enter_interp_only_mode(JvmtiThreadState *state) {
EC_TRACE(("[%s] # Entering interpreter only mode",
JvmtiTrace::safe_get_thread_name(state->get_thread_or_saved())));
JvmtiTrace::safe_get_thread_name(state->get_thread())));
JavaThread *target = state->get_thread();
Thread *current = Thread::current();
@ -371,8 +376,13 @@ void JvmtiEventControllerPrivate::enter_interp_only_mode(JvmtiThreadState *state
}
// This flag will be cleared in EnterInterpOnlyModeClosure handshake.
state->set_pending_interp_only_mode(true);
if (target == nullptr) { // an unmounted virtual thread
return; // EnterInterpOnlyModeClosure will be executed right after mount.
// There are two cases when entering interp_only_mode is postponed:
// 1. Unmounted virtual thread - EnterInterpOnlyModeClosure::do_thread will be executed at mount;
// 2. Carrier thread with mounted virtual thread - EnterInterpOnlyModeClosure::do_thread will be executed at unmount.
if (target == nullptr || // an unmounted virtual thread
JvmtiEnvBase::is_thread_carrying_vthread(target, state->get_thread_oop())) { // a vthread carrying thread
return; // EnterInterpOnlyModeClosure will be executed right after mount or unmount.
}
EnterInterpOnlyModeClosure hs(state);
if (target->is_handshake_safe_for(current)) {
@ -388,7 +398,8 @@ void JvmtiEventControllerPrivate::enter_interp_only_mode(JvmtiThreadState *state
void
JvmtiEventControllerPrivate::leave_interp_only_mode(JvmtiThreadState *state) {
EC_TRACE(("[%s] # Leaving interpreter only mode",
JvmtiTrace::safe_get_thread_name(state->get_thread_or_saved())));
JvmtiTrace::safe_get_thread_name(state->get_thread())));
if (state->is_pending_interp_only_mode()) {
state->set_pending_interp_only_mode(false); // Just clear the pending flag.
assert(!state->is_interp_only_mode(), "sanity check");
@ -409,7 +420,7 @@ JvmtiEventControllerPrivate::trace_changed(JvmtiThreadState *state, jlong now_en
if (changed & bit) {
// it changed, print it
log_trace(jvmti)("[%s] # %s event %s",
JvmtiTrace::safe_get_thread_name(state->get_thread_or_saved()),
JvmtiTrace::safe_get_thread_name(state->get_thread()),
(now_enabled & bit)? "Enabling" : "Disabling", JvmtiTrace::event_name((jvmtiEvent)ei));
}
}
@ -932,7 +943,7 @@ JvmtiEventControllerPrivate::set_user_enabled(JvmtiEnvBase *env, JavaThread *thr
void
JvmtiEventControllerPrivate::set_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
EC_TRACE(("[%s] # set frame pop - frame=%d",
JvmtiTrace::safe_get_thread_name(ets->get_thread_or_saved()),
JvmtiTrace::safe_get_thread_name(ets->get_thread()),
fpop.frame_number() ));
ets->get_frame_pops()->set(fpop);
@ -943,7 +954,7 @@ JvmtiEventControllerPrivate::set_frame_pop(JvmtiEnvThreadState *ets, JvmtiFrameP
void
JvmtiEventControllerPrivate::clear_frame_pop(JvmtiEnvThreadState *ets, JvmtiFramePop fpop) {
EC_TRACE(("[%s] # clear frame pop - frame=%d",
JvmtiTrace::safe_get_thread_name(ets->get_thread_or_saved()),
JvmtiTrace::safe_get_thread_name(ets->get_thread()),
fpop.frame_number() ));
ets->get_frame_pops()->clear(fpop);
@ -953,7 +964,7 @@ JvmtiEventControllerPrivate::clear_frame_pop(JvmtiEnvThreadState *ets, JvmtiFram
void
JvmtiEventControllerPrivate::clear_all_frame_pops(JvmtiEnvThreadState *ets) {
EC_TRACE(("[%s] # clear all frame pops",
JvmtiTrace::safe_get_thread_name(ets->get_thread_or_saved())
JvmtiTrace::safe_get_thread_name(ets->get_thread())
));
ets->get_frame_pops()->clear_all();
@ -965,7 +976,7 @@ JvmtiEventControllerPrivate::clear_to_frame_pop(JvmtiEnvThreadState *ets, JvmtiF
int cleared_cnt = ets->get_frame_pops()->clear_to(fpop);
EC_TRACE(("[%s] # clear to frame pop - frame=%d, count=%d",
JvmtiTrace::safe_get_thread_name(ets->get_thread_or_saved()),
JvmtiTrace::safe_get_thread_name(ets->get_thread()),
fpop.frame_number(),
cleared_cnt ));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -51,13 +51,12 @@ static const int UNKNOWN_STACK_DEPTH = -99;
//
JvmtiThreadState *JvmtiThreadState::_head = nullptr;
bool JvmtiThreadState::_seen_interp_only_mode = false;
Atomic<bool> JvmtiThreadState::_seen_interp_only_mode{false};
JvmtiThreadState::JvmtiThreadState(JavaThread* thread, oop thread_oop)
: _thread_event_enable() {
assert(JvmtiThreadState_lock->is_locked(), "sanity check");
_thread = thread;
_thread_saved = nullptr;
_exception_state = ES_CLEARED;
_hide_single_stepping = false;
_pending_interp_only_mode = false;
@ -118,11 +117,11 @@ JvmtiThreadState::JvmtiThreadState(JavaThread* thread, oop thread_oop)
if (thread != nullptr) {
if (thread_oop == nullptr || thread->jvmti_vthread() == nullptr || thread->jvmti_vthread() == thread_oop) {
// The JavaThread for carrier or mounted virtual thread case.
// The JavaThread for an active carrier or a mounted virtual thread case.
// Set this only if thread_oop is current thread->jvmti_vthread().
thread->set_jvmti_thread_state(this);
assert(!thread->is_interp_only_mode(), "sanity check");
}
thread->set_interp_only_mode(false);
}
}
@ -135,7 +134,10 @@ JvmtiThreadState::~JvmtiThreadState() {
}
// clear this as the state for the thread
assert(get_thread() != nullptr, "sanity check");
assert(get_thread()->jvmti_thread_state() == this, "sanity check");
get_thread()->set_jvmti_thread_state(nullptr);
get_thread()->set_interp_only_mode(false);
// zap our env thread states
{
@ -321,18 +323,21 @@ void JvmtiThreadState::add_env(JvmtiEnvBase *env) {
void JvmtiThreadState::enter_interp_only_mode() {
assert(_thread != nullptr, "sanity check");
assert(JvmtiThreadState_lock->is_locked(), "sanity check");
assert(!is_interp_only_mode(), "entering interp only when in interp only mode");
_seen_interp_only_mode = true;
assert(_thread->jvmti_vthread() == nullptr || _thread->jvmti_vthread() == get_thread_oop(), "sanity check");
assert(_thread->jvmti_thread_state() == this, "sanity check");
_saved_interp_only_mode = true;
_thread->set_interp_only_mode(true);
invalidate_cur_stack_depth();
}
void JvmtiThreadState::leave_interp_only_mode() {
assert(JvmtiThreadState_lock->is_locked(), "sanity check");
assert(is_interp_only_mode(), "leaving interp only when not in interp only mode");
if (_thread == nullptr) {
// Unmounted virtual thread updates the saved value.
_saved_interp_only_mode = false;
} else {
_saved_interp_only_mode = false;
if (_thread != nullptr && _thread->jvmti_thread_state() == this) {
assert(_thread->jvmti_vthread() == nullptr || _thread->jvmti_vthread() == get_thread_oop(), "sanity check");
_thread->set_interp_only_mode(false);
}
}
@ -340,7 +345,7 @@ void JvmtiThreadState::leave_interp_only_mode() {
// Helper routine used in several places
int JvmtiThreadState::count_frames() {
JavaThread* thread = get_thread_or_saved();
JavaThread* thread = get_thread();
javaVFrame *jvf;
ResourceMark rm;
if (thread == nullptr) {
@ -577,11 +582,8 @@ void JvmtiThreadState::update_thread_oop_during_vm_start() {
}
}
// For virtual threads only.
void JvmtiThreadState::set_thread(JavaThread* thread) {
_thread_saved = nullptr; // Common case.
if (!_is_virtual && thread == nullptr) {
// Save JavaThread* if carrier thread is being detached.
_thread_saved = _thread;
}
assert(is_virtual(), "sanity check");
_thread = thread;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -123,8 +123,11 @@ class JvmtiVTSuspender : AllStatic {
class JvmtiThreadState : public CHeapObj<mtInternal> {
private:
friend class JvmtiEnv;
// The _thread field is a link to the JavaThread associated with JvmtiThreadState.
// A platform (including carrier) thread should always have a stable link to its JavaThread.
// The _thread field of a virtual thread should point to the JavaThread when
// virtual thread is mounted. It should be set to null when it is unmounted.
JavaThread *_thread;
JavaThread *_thread_saved;
OopHandle _thread_oop_h;
// Jvmti Events that cannot be posted in their current context.
JvmtiDeferredEventQueue* _jvmti_event_queue;
@ -181,7 +184,7 @@ class JvmtiThreadState : public CHeapObj<mtInternal> {
inline JvmtiEnvThreadState* head_env_thread_state();
inline void set_head_env_thread_state(JvmtiEnvThreadState* ets);
static bool _seen_interp_only_mode; // interp_only_mode was requested at least once
static Atomic<bool> _seen_interp_only_mode; // interp_only_mode was requested at least once
public:
~JvmtiThreadState();
@ -204,19 +207,22 @@ class JvmtiThreadState : public CHeapObj<mtInternal> {
// Return true if any thread has entered interp_only_mode at any point during the JVMs execution.
static bool seen_interp_only_mode() {
return _seen_interp_only_mode;
return _seen_interp_only_mode.load_acquire();
}
void add_env(JvmtiEnvBase *env);
// The pending_interp_only_mode is set when the interp_only_mode is triggered.
// It is cleared by EnterInterpOnlyModeClosure handshake.
bool is_pending_interp_only_mode() { return _pending_interp_only_mode; }
void set_pending_interp_only_mode(bool val) { _pending_interp_only_mode = val; }
bool is_pending_interp_only_mode() { return _pending_interp_only_mode; }
void set_pending_interp_only_mode(bool val) {
_seen_interp_only_mode.release_store(true);
_pending_interp_only_mode = val;
}
// Used by the interpreter for fullspeed debugging support
bool is_interp_only_mode() {
return _thread == nullptr ? _saved_interp_only_mode : _thread->is_interp_only_mode();
return _saved_interp_only_mode;
}
void enter_interp_only_mode();
void leave_interp_only_mode();
@ -245,8 +251,10 @@ class JvmtiThreadState : public CHeapObj<mtInternal> {
int count_frames();
inline JavaThread *get_thread() { return _thread; }
inline JavaThread *get_thread_or_saved(); // return _thread_saved if _thread is null
inline JavaThread *get_thread() {
assert(is_virtual() || _thread != nullptr, "sanity check");
return _thread;
}
// Needed for virtual threads as they can migrate to different JavaThread's.
// Also used for carrier threads to clear/restore _thread.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -130,22 +130,21 @@ inline JvmtiThreadState* JvmtiThreadState::state_for(JavaThread *thread, Handle
return state;
}
inline JavaThread* JvmtiThreadState::get_thread_or_saved() {
// Use _thread_saved if cthread is detached from JavaThread (_thread == null).
return (_thread == nullptr && !is_virtual()) ? _thread_saved : _thread;
}
inline void JvmtiThreadState::set_should_post_on_exceptions(bool val) {
get_thread_or_saved()->set_should_post_on_exceptions_flag(val ? JNI_TRUE : JNI_FALSE);
get_thread()->set_should_post_on_exceptions_flag(val ? JNI_TRUE : JNI_FALSE);
}
inline void JvmtiThreadState::unbind_from(JvmtiThreadState* state, JavaThread* thread) {
if (state == nullptr) {
assert(!thread->is_interp_only_mode(), "sanity check");
return;
}
// Save thread's interp_only_mode.
state->_saved_interp_only_mode = thread->is_interp_only_mode();
state->set_thread(nullptr); // Make sure stale _thread value is never used.
assert(thread->jvmti_thread_state() == state, "sanity check");
assert(state->get_thread() == thread, "sanity check");
assert(thread->is_interp_only_mode() == state->_saved_interp_only_mode, "sanity check");
if (state->is_virtual()) { // clean _thread link for virtual threads only
state->set_thread(nullptr); // make sure stale _thread value is never used
}
}
inline void JvmtiThreadState::bind_to(JvmtiThreadState* state, JavaThread* thread) {
@ -158,7 +157,7 @@ inline void JvmtiThreadState::bind_to(JvmtiThreadState* state, JavaThread* threa
// Bind JavaThread to JvmtiThreadState.
thread->set_jvmti_thread_state(state);
if (state != nullptr) {
if (state != nullptr && state->is_virtual()) {
// Bind to JavaThread.
state->set_thread(thread);
}
@ -167,8 +166,13 @@ inline void JvmtiThreadState::bind_to(JvmtiThreadState* state, JavaThread* threa
inline void JvmtiThreadState::process_pending_interp_only(JavaThread* current) {
JvmtiThreadState* state = current->jvmti_thread_state();
if (state != nullptr && state->is_pending_interp_only_mode()) {
JvmtiEventController::enter_interp_only_mode(state);
if (state != nullptr && seen_interp_only_mode()) { // avoid MutexLocker if possible
MutexLocker mu(JvmtiThreadState_lock);
if (state->is_pending_interp_only_mode()) {
assert(state->get_thread() == current, "sanity check");
assert(!state->is_interp_only_mode(), "sanity check");
JvmtiEventController::enter_interp_only_mode(state);
}
}
}
#endif // SHARE_PRIMS_JVMTITHREADSTATE_INLINE_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -716,7 +716,7 @@ class PerfDataManager : AllStatic {
// Utility Classes
/*
* this class will administer a PerfCounter used as a time accumulator
* This class will administer a PerfCounter used as a time accumulator
* for a basic block much like the TraceTime class.
*
* Example:
@ -731,6 +731,9 @@ class PerfDataManager : AllStatic {
* Note: use of this class does not need to occur within a guarded
* block. The UsePerfData guard is used with the implementation
* of this class.
*
* But also note this class does not guard against shutdown races -
* see SafePerfTraceTime below.
*/
class PerfTraceTime : public StackObj {
@ -756,6 +759,26 @@ class PerfTraceTime : public StackObj {
}
};
/* A variant of PerfTraceTime that guards against use during shutdown -
* see PerfDataManager::destroy.
*/
class SafePerfTraceTime : public StackObj {
protected:
elapsedTimer _t;
PerfLongCounter* _timerp;
public:
inline SafePerfTraceTime(PerfLongCounter* timerp);
const char* name() const {
assert(_timerp != nullptr, "sanity");
return _timerp->name();
}
inline ~SafePerfTraceTime();
};
/* The PerfTraceTimedEvent class is responsible for counting the
* occurrence of some event and measuring the elapsed time of
* the event in two separate PerfCounter instances.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,6 +27,7 @@
#include "runtime/perfData.hpp"
#include "utilities/globalCounter.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
@ -50,4 +51,23 @@ inline bool PerfDataManager::exists(const char* name) {
}
}
inline SafePerfTraceTime::SafePerfTraceTime(PerfLongCounter* timerp) : _timerp(timerp) {
GlobalCounter::CriticalSection cs(Thread::current());
if (!UsePerfData || !PerfDataManager::has_PerfData() || timerp == nullptr) {
return;
}
_t.start();
}
inline SafePerfTraceTime::~SafePerfTraceTime() {
GlobalCounter::CriticalSection cs(Thread::current());
if (!UsePerfData || !PerfDataManager::has_PerfData() || !_t.is_active()) {
return;
}
_t.stop();
_timerp->inc(_t.ticks());
}
#endif // SHARE_RUNTIME_PERFDATA_INLINE_HPP

View File

@ -42,7 +42,7 @@ class ThreadWXEnable {
public:
ThreadWXEnable(WXMode* new_mode, Thread* thread) :
_thread(thread), _this_wx_mode(new_mode) {
NOT_PRODUCT(PerfTraceTime ptt(ClassLoader::perf_change_wx_time());)
NOT_PRODUCT(SafePerfTraceTime ptt(ClassLoader::perf_change_wx_time());)
JavaThread* javaThread
= _thread && _thread->is_Java_thread()
? JavaThread::cast(_thread) : nullptr;
@ -55,7 +55,7 @@ public:
}
ThreadWXEnable(WXMode new_mode, Thread* thread) :
_thread(thread), _this_wx_mode(nullptr) {
NOT_PRODUCT(PerfTraceTime ptt(ClassLoader::perf_change_wx_time());)
NOT_PRODUCT(SafePerfTraceTime ptt(ClassLoader::perf_change_wx_time());)
JavaThread* javaThread
= _thread && _thread->is_Java_thread()
? JavaThread::cast(_thread) : nullptr;
@ -68,7 +68,7 @@ public:
}
~ThreadWXEnable() {
NOT_PRODUCT(PerfTraceTime ptt(ClassLoader::perf_change_wx_time());)
NOT_PRODUCT(SafePerfTraceTime ptt(ClassLoader::perf_change_wx_time());)
if (_thread) {
_thread->enable_wx(_old_mode);
JavaThread* javaThread
@ -86,4 +86,3 @@ public:
#endif // MACOS_AARCH64
#endif // SHARE_RUNTIME_THREADWXSETTERS_INLINE_HPP

View File

@ -168,6 +168,29 @@ class oopDesc;
#define SIZE_FORMAT_X_0 "0x%08" PRIxPTR
#endif // _LP64
template<size_t N>
constexpr auto sizeof_auto_impl() {
if constexpr (N <= std::numeric_limits<uint8_t>::max()) return uint8_t(N);
else if constexpr (N <= std::numeric_limits<uint16_t>::max()) return uint16_t(N);
else if constexpr (N <= std::numeric_limits<uint32_t>::max()) return uint32_t(N);
else return uint64_t(N);
}
// Yields the size (in bytes) of the operand, using the smallest
// unsigned type that can represent the size value. The operand may be
// an expression, which is an unevaluated operand, or it may be a
// type. All of the restrictions for sizeof operands apply to the
// operand. The result is a constant expression.
//
// Example of correct usage of sizeof/sizeof_auto:
// // this will wrap using sizeof_auto, use sizeof to ensure computation using size_t
// size_t size = std::numeric_limits<uint32_t>::max() * sizeof(uint16_t);
// // implicit narrowing conversion or compiler warning/error using stricter compiler flags when using sizeof
// int count = 42 / sizeof_auto(uint16_t);
#define sizeof_auto(...) sizeof_auto_impl<sizeof(__VA_ARGS__)>()
// Convert pointer to intptr_t, for use in printing pointers.
inline intptr_t p2i(const volatile void* p) {
return (intptr_t) p;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -11233,7 +11233,7 @@ class Character implements java.io.Serializable, Comparable<Character>, Constabl
* @param codePoint the character (Unicode code point) to be tested.
* @return {@code true} if the character is an Emoji;
* {@code false} otherwise.
* @spec https://unicode.org/reports/tr51/ Unicode Emoji
* @spec https://www.unicode.org/reports/tr51/ Unicode Emoji
* @since 21
*/
public static boolean isEmoji(int codePoint) {
@ -11252,7 +11252,7 @@ class Character implements java.io.Serializable, Comparable<Character>, Constabl
* @param codePoint the character (Unicode code point) to be tested.
* @return {@code true} if the character has the Emoji Presentation
* property; {@code false} otherwise.
* @spec https://unicode.org/reports/tr51/ Unicode Emoji
* @spec https://www.unicode.org/reports/tr51/ Unicode Emoji
* @since 21
*/
public static boolean isEmojiPresentation(int codePoint) {
@ -11271,7 +11271,7 @@ class Character implements java.io.Serializable, Comparable<Character>, Constabl
* @param codePoint the character (Unicode code point) to be tested.
* @return {@code true} if the character is an Emoji Modifier;
* {@code false} otherwise.
* @spec https://unicode.org/reports/tr51/ Unicode Emoji
* @spec https://www.unicode.org/reports/tr51/ Unicode Emoji
* @since 21
*/
public static boolean isEmojiModifier(int codePoint) {
@ -11290,7 +11290,7 @@ class Character implements java.io.Serializable, Comparable<Character>, Constabl
* @param codePoint the character (Unicode code point) to be tested.
* @return {@code true} if the character is an Emoji Modifier Base;
* {@code false} otherwise.
* @spec https://unicode.org/reports/tr51/ Unicode Emoji
* @spec https://www.unicode.org/reports/tr51/ Unicode Emoji
* @since 21
*/
public static boolean isEmojiModifierBase(int codePoint) {
@ -11309,7 +11309,7 @@ class Character implements java.io.Serializable, Comparable<Character>, Constabl
* @param codePoint the character (Unicode code point) to be tested.
* @return {@code true} if the character is an Emoji Component;
* {@code false} otherwise.
* @spec https://unicode.org/reports/tr51/ Unicode Emoji
* @spec https://www.unicode.org/reports/tr51/ Unicode Emoji
* @since 21
*/
public static boolean isEmojiComponent(int codePoint) {
@ -11328,7 +11328,7 @@ class Character implements java.io.Serializable, Comparable<Character>, Constabl
* @param codePoint the character (Unicode code point) to be tested.
* @return {@code true} if the character is an Extended Pictographic;
* {@code false} otherwise.
* @spec https://unicode.org/reports/tr51/ Unicode Emoji
* @spec https://www.unicode.org/reports/tr51/ Unicode Emoji
* @since 21
*/
public static boolean isExtendedPictographic(int codePoint) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,11 +55,10 @@ class Shutdown {
private static int currentRunningHook = -1;
/* The preceding static fields are protected by this lock */
private static class Lock { };
private static Object lock = new Lock();
private static final Object lock = new Object();
/* Lock object for the native halt method */
private static Object haltLock = new Lock();
private static final Object haltLock = new Object();
/**
* Add a new system shutdown hook. Checks the shutdown state and

View File

@ -1881,8 +1881,8 @@ public class Thread implements Runnable {
* been {@link #start() started}.
*
* @implNote
* For platform threads, the implementation uses a loop of {@code this.wait}
* calls conditioned on {@code this.isAlive}. As a thread terminates the
* This implementation uses a loop of {@code this.wait} calls
* conditioned on {@code this.isAlive}. As a thread terminates the
* {@code this.notifyAll} method is invoked. It is recommended that
* applications not use {@code wait}, {@code notify}, or
* {@code notifyAll} on {@code Thread} instances.
@ -1901,13 +1901,12 @@ public class Thread implements Runnable {
public final void join(long millis) throws InterruptedException {
if (millis < 0)
throw new IllegalArgumentException("timeout value is negative");
if (this instanceof VirtualThread vthread) {
if (isAlive()) {
long nanos = MILLISECONDS.toNanos(millis);
vthread.joinNanos(nanos);
}
if (!isAlive())
return;
// ensure there is a notifyAll to wake up waiters when this thread terminates
if (this instanceof VirtualThread vthread) {
vthread.beforeJoin();
}
synchronized (this) {
@ -1936,8 +1935,8 @@ public class Thread implements Runnable {
* been {@link #start() started}.
*
* @implNote
* For platform threads, the implementation uses a loop of {@code this.wait}
* calls conditioned on {@code this.isAlive}. As a thread terminates the
* This implementation uses a loop of {@code this.wait} calls
* conditioned on {@code this.isAlive}. As a thread terminates the
* {@code this.notifyAll} method is invoked. It is recommended that
* applications not use {@code wait}, {@code notify}, or
* {@code notifyAll} on {@code Thread} instances.
@ -1966,16 +1965,6 @@ public class Thread implements Runnable {
throw new IllegalArgumentException("nanosecond timeout value out of range");
}
if (this instanceof VirtualThread vthread) {
if (isAlive()) {
// convert arguments to a total in nanoseconds
long totalNanos = MILLISECONDS.toNanos(millis);
totalNanos += Math.min(Long.MAX_VALUE - totalNanos, nanos);
vthread.joinNanos(totalNanos);
}
return;
}
if (nanos > 0 && millis < Long.MAX_VALUE) {
millis++;
}
@ -2035,10 +2024,6 @@ public class Thread implements Runnable {
if (nanos <= 0)
return false;
if (this instanceof VirtualThread vthread) {
return vthread.joinNanos(nanos);
}
// convert to milliseconds
long millis = MILLISECONDS.convert(nanos, NANOSECONDS);
if (nanos > NANOSECONDS.convert(millis, MILLISECONDS)) {

View File

@ -26,7 +26,6 @@ package java.lang;
import java.util.Locale;
import java.util.Objects;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.Executors;
import java.util.concurrent.ForkJoinPool;
@ -68,7 +67,6 @@ final class VirtualThread extends BaseVirtualThread {
private static final long STATE = U.objectFieldOffset(VirtualThread.class, "state");
private static final long PARK_PERMIT = U.objectFieldOffset(VirtualThread.class, "parkPermit");
private static final long CARRIER_THREAD = U.objectFieldOffset(VirtualThread.class, "carrierThread");
private static final long TERMINATION = U.objectFieldOffset(VirtualThread.class, "termination");
private static final long ON_WAITING_LIST = U.objectFieldOffset(VirtualThread.class, "onWaitingList");
// scheduler and continuation
@ -184,8 +182,8 @@ final class VirtualThread extends BaseVirtualThread {
// carrier thread when mounted, accessed by VM
private volatile Thread carrierThread;
// termination object when joining, created lazily if needed
private volatile CountDownLatch termination;
// true to notifyAll after this virtual thread terminates
private volatile boolean notifyAllAfterTerminate;
/**
* Returns the default scheduler.
@ -677,11 +675,11 @@ final class VirtualThread extends BaseVirtualThread {
assert carrierThread == null;
setState(TERMINATED);
// notify anyone waiting for this virtual thread to terminate
CountDownLatch termination = this.termination;
if (termination != null) {
assert termination.getCount() == 1;
termination.countDown();
// notifyAll to wakeup any threads waiting for this thread to terminate
if (notifyAllAfterTerminate) {
synchronized (this) {
notifyAll();
}
}
// notify container
@ -740,6 +738,13 @@ final class VirtualThread extends BaseVirtualThread {
// do nothing
}
/**
* Invoked by Thread.join before a thread waits for this virtual thread to terminate.
*/
void beforeJoin() {
notifyAllAfterTerminate = true;
}
/**
* Parks until unparked or interrupted. If already unparked then the parking
* permit is consumed and this method completes immediately (meaning it doesn't
@ -999,36 +1004,6 @@ final class VirtualThread extends BaseVirtualThread {
}
}
/**
* Waits up to {@code nanos} nanoseconds for this virtual thread to terminate.
* A timeout of {@code 0} means to wait forever.
*
* @throws InterruptedException if interrupted while waiting
* @return true if the thread has terminated
*/
boolean joinNanos(long nanos) throws InterruptedException {
if (state() == TERMINATED)
return true;
// ensure termination object exists, then re-check state
CountDownLatch termination = getTermination();
if (state() == TERMINATED)
return true;
// wait for virtual thread to terminate
if (nanos == 0) {
termination.await();
} else {
boolean terminated = termination.await(nanos, NANOSECONDS);
if (!terminated) {
// waiting time elapsed
return false;
}
}
assert state() == TERMINATED;
return true;
}
@Override
void blockedOn(Interruptible b) {
disableSuspendAndPreempt();
@ -1239,20 +1214,6 @@ final class VirtualThread extends BaseVirtualThread {
return obj == this;
}
/**
* Returns the termination object, creating it if needed.
*/
private CountDownLatch getTermination() {
CountDownLatch termination = this.termination;
if (termination == null) {
termination = new CountDownLatch(1);
if (!U.compareAndSetReference(this, TERMINATION, null, termination)) {
termination = this.termination;
}
}
return termination;
}
/**
* Returns the lock object to synchronize on when accessing carrierThread.
* The lock prevents carrierThread from being reset to null during unmount.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -746,15 +746,7 @@ abstract class ClassSpecializer<T,K,S extends ClassSpecializer<T,K,S>.SpeciesDat
new Consumer<>() {
@Override
public void accept(CodeBuilder cob) {
cob.aload(0); // this
final List<Var> ctorArgs = AFTER_THIS.fromTypes(superCtorType.parameterList());
for (Var ca : ctorArgs) {
ca.emitLoadInstruction(cob);
}
// super(ca...)
cob.invokespecial(superClassDesc, INIT_NAME, methodDesc(superCtorType));
// store down fields
Var lastFV = AFTER_THIS.lastOf(ctorArgs);
@ -766,6 +758,12 @@ abstract class ClassSpecializer<T,K,S extends ClassSpecializer<T,K,S>.SpeciesDat
cob.putfield(classDesc, f.name, f.desc);
}
// super(ca...)
cob.aload(0); // this
for (Var ca : ctorArgs) {
ca.emitLoadInstruction(cob);
}
cob.invokespecial(superClassDesc, INIT_NAME, methodDesc(superCtorType));
cob.return_();
}
});

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -391,15 +391,15 @@ import sun.invoke.util.Wrapper;
new Consumer<>() {
@Override
public void accept(CodeBuilder cob) {
cob.aload(0)
.invokespecial(CD_Object, INIT_NAME, MTD_void);
int parameterCount = factoryType.parameterCount();
for (int i = 0; i < parameterCount; i++) {
cob.aload(0)
.loadLocal(TypeKind.from(factoryType.parameterType(i)), cob.parameterSlot(i))
.putfield(pool.fieldRefEntry(lambdaClassEntry, pool.nameAndTypeEntry(argName(i), argDescs[i])));
}
cob.return_();
cob.aload(0)
.invokespecial(CD_Object, INIT_NAME, MTD_void)
.return_();
}
});
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -362,10 +362,8 @@ public final class MethodHandleProxies {
// <init>(Lookup, MethodHandle target, MethodHandle callerBoundTarget)
clb.withMethodBody(INIT_NAME, MTD_void_Lookup_MethodHandle_MethodHandle, 0, cob -> {
cob.aload(0)
.invokespecial(CD_Object, INIT_NAME, MTD_void)
// call ensureOriginalLookup to verify the given Lookup has access
.aload(1)
// call ensureOriginalLookup to verify the given Lookup has access
cob.aload(1)
.invokestatic(proxyDesc, ENSURE_ORIGINAL_LOOKUP, MTD_void_Lookup)
// this.target = target;
.aload(0)
@ -383,7 +381,9 @@ public final class MethodHandleProxies {
}
// complete
cob.return_();
cob.aload(0)
.invokespecial(CD_Object, INIT_NAME, MTD_void)
.return_();
});
// private static void ensureOriginalLookup(Lookup) checks if the given Lookup

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,8 +60,7 @@ public class ReferenceQueue<@jdk.internal.RequiresIdentity T> {
private volatile Reference<? extends T> head;
private long queueLength = 0;
private static class Lock { };
private final Lock lock = new Lock();
private final Object lock = new Object();
/**
* Constructs a new reference-object queue.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -363,9 +363,9 @@ public final class ObjectMethods {
* @return the method handle
*/
private static MethodHandle makeToString(MethodHandles.Lookup lookup,
Class<?> receiverClass,
MethodHandle[] getters,
List<String> names) {
Class<?> receiverClass,
MethodHandle[] getters,
List<String> names) {
assert getters.length == names.size();
if (getters.length == 0) {
// special case
@ -516,8 +516,8 @@ public final class ObjectMethods {
requireNonNull(type);
requireNonNull(recordClass);
requireNonNull(names);
requireNonNull(getters);
Arrays.stream(getters).forEach(Objects::requireNonNull);
List<MethodHandle> getterList = List.of(getters); // deep null check
MethodType methodType;
if (type instanceof MethodType mt)
methodType = mt;
@ -526,7 +526,14 @@ public final class ObjectMethods {
if (!MethodHandle.class.equals(type))
throw new IllegalArgumentException(type.toString());
}
List<MethodHandle> getterList = List.of(getters);
for (MethodHandle getter : getterList) {
var getterType = getter.type();
if (getterType.parameterCount() != 1 || getterType.returnType() == void.class || getterType.parameterType(0) != recordClass) {
throw new IllegalArgumentException("Illegal getter type %s for recordClass %s".formatted(getterType, recordClass.getTypeName()));
}
}
MethodHandle handle = switch (methodName) {
case "equals" -> {
if (methodType != null && !methodType.equals(MethodType.methodType(boolean.class, recordClass, Object.class)))
@ -541,7 +548,7 @@ public final class ObjectMethods {
case "toString" -> {
if (methodType != null && !methodType.equals(MethodType.methodType(String.class, recordClass)))
throw new IllegalArgumentException("Bad method type: " + methodType);
List<String> nameList = "".equals(names) ? List.of() : List.of(names.split(";"));
List<String> nameList = names.isEmpty() ? List.of() : List.of(names.split(";"));
if (nameList.size() != getterList.size())
throw new IllegalArgumentException("Name list and accessor list do not match");
yield makeToString(lookup, recordClass, getters, nameList);

View File

@ -692,7 +692,7 @@ public class ZipFile implements ZipConstants, Closeable {
final Set<InputStream> istreams;
// List of cached Inflater objects for decompression
Deque<Inflater> inflaterCache;
List<Inflater> inflaterCache;
final Cleanable cleanable;
@ -702,7 +702,7 @@ public class ZipFile implements ZipConstants, Closeable {
assert zipCoder != null : "null ZipCoder";
this.cleanable = CleanerFactory.cleaner().register(zf, this);
this.istreams = Collections.newSetFromMap(new WeakHashMap<>());
this.inflaterCache = new ArrayDeque<>();
this.inflaterCache = new ArrayList<>();
this.zsrc = Source.get(file, (mode & OPEN_DELETE) != 0, zipCoder);
}
@ -715,10 +715,10 @@ public class ZipFile implements ZipConstants, Closeable {
* a new one.
*/
Inflater getInflater() {
Inflater inf;
synchronized (inflaterCache) {
if ((inf = inflaterCache.poll()) != null) {
return inf;
if (!inflaterCache.isEmpty()) {
// return the most recently used Inflater from the cache of not-in-use Inflaters
return inflaterCache.removeLast();
}
}
return new Inflater(true);
@ -728,7 +728,7 @@ public class ZipFile implements ZipConstants, Closeable {
* Releases the specified inflater to the list of available inflaters.
*/
void releaseInflater(Inflater inf) {
Deque<Inflater> inflaters = this.inflaterCache;
List<Inflater> inflaters = this.inflaterCache;
if (inflaters != null) {
synchronized (inflaters) {
// double checked!
@ -747,13 +747,12 @@ public class ZipFile implements ZipConstants, Closeable {
IOException ioe = null;
// Release cached inflaters and close the cache first
Deque<Inflater> inflaters = this.inflaterCache;
List<Inflater> inflaters = this.inflaterCache;
if (inflaters != null) {
synchronized (inflaters) {
// no need to double-check as only one thread gets a
// chance to execute run() (Cleaner guarantee)...
Inflater inf;
while ((inf = inflaters.poll()) != null) {
for (Inflater inf : inflaters) {
inf.end();
}
// close inflaters cache
@ -762,23 +761,22 @@ public class ZipFile implements ZipConstants, Closeable {
}
// Close streams, release their inflaters
if (istreams != null) {
synchronized (istreams) {
if (!istreams.isEmpty()) {
InputStream[] copy = istreams.toArray(new InputStream[0]);
istreams.clear();
for (InputStream is : copy) {
try {
is.close();
} catch (IOException e) {
if (ioe == null) ioe = e;
else ioe.addSuppressed(e);
}
synchronized (istreams) {
if (!istreams.isEmpty()) {
InputStream[] copy = istreams.toArray(new InputStream[0]);
istreams.clear();
for (InputStream is : copy) {
try {
is.close();
} catch (IOException e) {
if (ioe == null) ioe = e;
else ioe.addSuppressed(e);
}
}
}
}
// Release ZIP src
if (zsrc != null) {
synchronized (zsrc) {
@ -1721,8 +1719,10 @@ public class ZipFile implements ZipConstants, Closeable {
this.cen = null;
return; // only END header present
}
if (end.cenlen > end.endpos)
// Validate END header
if (end.cenlen > end.endpos) {
zerror("invalid END header (bad central directory size)");
}
long cenpos = end.endpos - end.cenlen; // position of CEN table
// Get position of first local file (LOC) header, taking into
// account that there may be a stub prefixed to the ZIP file.
@ -1730,18 +1730,22 @@ public class ZipFile implements ZipConstants, Closeable {
if (locpos < 0) {
zerror("invalid END header (bad central directory offset)");
}
// read in the CEN
if (end.cenlen > MAX_CEN_SIZE) {
zerror("invalid END header (central directory size too large)");
}
if (end.centot < 0 || end.centot > end.cenlen / CENHDR) {
zerror("invalid END header (total entries count too large)");
}
cen = this.cen = new byte[(int)end.cenlen];
if (readFullyAt(cen, 0, cen.length, cenpos) != end.cenlen) {
// Validation ensures these are <= Integer.MAX_VALUE
int cenlen = Math.toIntExact(end.cenlen);
int centot = Math.toIntExact(end.centot);
// read in the CEN
cen = this.cen = new byte[cenlen];
if (readFullyAt(cen, 0, cen.length, cenpos) != cenlen) {
zerror("read CEN tables failed");
}
this.total = Math.toIntExact(end.centot);
this.total = centot;
} else {
cen = this.cen;
this.total = knownTotal;

View File

@ -336,7 +336,7 @@ public class VectorSupport {
@IntrinsicCandidate
public static
<V extends Vector<E>, E>
V libraryUnaryOp(long addr, Class<? extends V> vClass, Class<E> eClass, int length, String debugName,
V libraryUnaryOp(long addr, Class<? extends V> vClass, int laneType, int length, String debugName,
V v,
UnaryOperation<V,?> defaultImpl) {
assert isNonCapturingLambda(defaultImpl) : defaultImpl;
@ -374,7 +374,7 @@ public class VectorSupport {
@IntrinsicCandidate
public static
<V extends VectorPayload, E>
V libraryBinaryOp(long addr, Class<? extends V> vClass, Class<E> eClass, int length, String debugName,
V libraryBinaryOp(long addr, Class<? extends V> vClass, int laneType, int length, String debugName,
V v1, V v2,
BinaryOperation<V,?> defaultImpl) {
assert isNonCapturingLambda(defaultImpl) : defaultImpl;

View File

@ -1,9 +1,9 @@
## zlib v1.3.1
## zlib v1.3.2
### zlib License
<pre>
Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler
Copyright (C) 1995-2026 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages

View File

@ -1,6 +1,57 @@
ChangeLog file for zlib
Changes in 1.3.2 (17 Feb 2026)
- Continued rewrite of CMake build [Vollstrecker]
- Various portability improvements
- Various github workflow additions and improvements
- Check for negative lengths in crc32_combine functions
- Copy only the initialized window contents in inflateCopy
- Prevent the use of insecure functions without an explicit request
- Add compressBound_z and deflateBound_z functions for large values
- Use atomics to build inflate fixed tables once
- Add definition of ZLIB_INSECURE to build tests with c89 and c94
- Add --undefined option to ./configure for UBSan checker
- Copy only the initialized deflate state in deflateCopy
- Zero inflate state on allocation
- Remove untgz from contrib
- Add _z versions of the compress and uncompress functions
- Vectorize the CRC-32 calculation on the s390x
- Set bit 11 of the zip header flags in minizip if UTF-8
- Update OS/400 support
- Add a test to configure to check for a working compiler
- Check for invalid NULL pointer inputs to zlib operations
- Add --mandir to ./configure to specify manual directory
- Add LICENSE.Info-Zip to contrib/minizip
- Remove vstudio projects in lieu of cmake-generated projects
- Replace strcpy() with memcpy() in contrib/minizip
Changes in 1.3.1.2 (8 Dec 2025)
- Improve portability to RISC OS
- Permit compiling contrib/minizip/unzip.c with decryption
- Enable build of shared library on AIX
- Make deflateBound() more conservative and handle Z_STREAM_END
- Add zipAlreadyThere() to minizip zip.c to help avoid duplicates
- Make z_off_t 64 bits by default
- Add deflateUsed() function to get the used bits in the last byte
- Avoid out-of-bounds pointer arithmetic in inflateCopy()
- Add Haiku to configure for proper LDSHARED settings
- Add Bazel targets
- Complete rewrite of CMake build [Vollstrecker]
- Clarify the use of errnum in gzerror()
- Note that gzseek() requests are deferred until the next operation
- Note the use of gzungetc() to run a deferred seek while reading
- Fix bug in inflatePrime() for 16-bit ints
- Add a "G" option to force gzip, disabling transparency in gzread()
- Improve the discrimination between trailing garbage and bad gzip
- Allow gzflush() to write empty gzip members
- Remove redundant frees of point list on error in examples/zran.c
- Clarify the use of inflateGetHeader()
- Update links to the RFCs
- Return all available uncompressed data on error in gzread.c
- Support non-blocking devices in the gz* routines
- Various other small improvements
Changes in 1.3.1 (22 Jan 2024)
- Reject overflows of zip header fields in minizip
- Fix bug in inflateSync() for data held in bit buffer

View File

@ -1,10 +1,10 @@
ZLIB DATA COMPRESSION LIBRARY
zlib 1.3.1 is a general purpose data compression library. All the code is
thread safe. The data format used by the zlib library is described by RFCs
(Request for Comments) 1950 to 1952 in the files
http://tools.ietf.org/html/rfc1950 (zlib format), rfc1951 (deflate format) and
rfc1952 (gzip format).
zlib 1.3.2 is a general purpose data compression library. All the code is
thread safe (though see the FAQ for caveats). The data format used by the zlib
library is described by RFCs (Request for Comments) 1950 to 1952 at
https://datatracker.ietf.org/doc/html/rfc1950 (zlib format), rfc1951 (deflate
format) and rfc1952 (gzip format).
All functions of the compression library are documented in the file zlib.h
(volunteer to write man pages welcome, contact zlib@gzip.org). A usage example
@ -21,17 +21,17 @@ make_vms.com.
Questions about zlib should be sent to <zlib@gzip.org>, or to Gilles Vollant
<info@winimage.com> for the Windows DLL version. The zlib home page is
http://zlib.net/ . Before reporting a problem, please check this site to
https://zlib.net/ . Before reporting a problem, please check this site to
verify that you have the latest version of zlib; otherwise get the latest
version and check whether the problem still exists or not.
PLEASE read the zlib FAQ http://zlib.net/zlib_faq.html before asking for help.
PLEASE read the zlib FAQ https://zlib.net/zlib_faq.html before asking for help.
Mark Nelson <markn@ieee.org> wrote an article about zlib for the Jan. 1997
issue of Dr. Dobb's Journal; a copy of the article is available at
https://marknelson.us/posts/1997/01/01/zlib-engine.html .
https://zlib.net/nelson/ .
The changes made in version 1.3.1 are documented in the file ChangeLog.
The changes made in version 1.3.2 are documented in the file ChangeLog.
Unsupported third party contributions are provided in directory contrib/ .
@ -43,9 +43,9 @@ can be found at https://github.com/pmqs/IO-Compress .
A Python interface to zlib written by A.M. Kuchling <amk@amk.ca> is
available in Python 1.5 and later versions, see
http://docs.python.org/library/zlib.html .
https://docs.python.org/3/library/zlib.html .
zlib is built into tcl: http://wiki.tcl.tk/4610 .
zlib is built into tcl: https://wiki.tcl-lang.org/page/zlib .
An experimental package to read and write files in .zip format, written on top
of zlib by Gilles Vollant <info@winimage.com>, is available in the
@ -69,9 +69,7 @@ Notes for some targets:
- zlib doesn't work on HP-UX 9.05 with some versions of /bin/cc. It works with
other compilers. Use "make test" to check your compiler.
- gzdopen is not supported on RISCOS or BEOS.
- For PalmOs, see http://palmzlib.sourceforge.net/
- For PalmOs, see https://palmzlib.sourceforge.net/
Acknowledgments:
@ -83,7 +81,7 @@ Acknowledgments:
Copyright notice:
(C) 1995-2024 Jean-loup Gailly and Mark Adler
(C) 1995-2026 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages

View File

@ -23,7 +23,7 @@
*/
/* compress.c -- compress a memory buffer
* Copyright (C) 1995-2005, 2014, 2016 Jean-loup Gailly, Mark Adler
* Copyright (C) 1995-2026 Jean-loup Gailly, Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -42,13 +42,19 @@
compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_BUF_ERROR if there was not enough room in the output buffer,
Z_STREAM_ERROR if the level parameter is invalid.
The _z versions of the functions take size_t length arguments.
*/
int ZEXPORT compress2(Bytef *dest, uLongf *destLen, const Bytef *source,
uLong sourceLen, int level) {
int ZEXPORT compress2_z(Bytef *dest, z_size_t *destLen, const Bytef *source,
z_size_t sourceLen, int level) {
z_stream stream;
int err;
const uInt max = (uInt)-1;
uLong left;
z_size_t left;
if ((sourceLen > 0 && source == NULL) ||
destLen == NULL || (*destLen > 0 && dest == NULL))
return Z_STREAM_ERROR;
left = *destLen;
*destLen = 0;
@ -67,23 +73,36 @@ int ZEXPORT compress2(Bytef *dest, uLongf *destLen, const Bytef *source,
do {
if (stream.avail_out == 0) {
stream.avail_out = left > (uLong)max ? max : (uInt)left;
stream.avail_out = left > (z_size_t)max ? max : (uInt)left;
left -= stream.avail_out;
}
if (stream.avail_in == 0) {
stream.avail_in = sourceLen > (uLong)max ? max : (uInt)sourceLen;
stream.avail_in = sourceLen > (z_size_t)max ? max :
(uInt)sourceLen;
sourceLen -= stream.avail_in;
}
err = deflate(&stream, sourceLen ? Z_NO_FLUSH : Z_FINISH);
} while (err == Z_OK);
*destLen = stream.total_out;
*destLen = (z_size_t)(stream.next_out - dest);
deflateEnd(&stream);
return err == Z_STREAM_END ? Z_OK : err;
}
int ZEXPORT compress2(Bytef *dest, uLongf *destLen, const Bytef *source,
uLong sourceLen, int level) {
int ret;
z_size_t got = *destLen;
ret = compress2_z(dest, &got, source, sourceLen, level);
*destLen = (uLong)got;
return ret;
}
/* ===========================================================================
*/
int ZEXPORT compress_z(Bytef *dest, z_size_t *destLen, const Bytef *source,
z_size_t sourceLen) {
return compress2_z(dest, destLen, source, sourceLen,
Z_DEFAULT_COMPRESSION);
}
int ZEXPORT compress(Bytef *dest, uLongf *destLen, const Bytef *source,
uLong sourceLen) {
return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION);
@ -93,7 +112,12 @@ int ZEXPORT compress(Bytef *dest, uLongf *destLen, const Bytef *source,
If the default memLevel or windowBits for deflateInit() is changed, then
this function needs to be updated.
*/
uLong ZEXPORT compressBound(uLong sourceLen) {
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
(sourceLen >> 25) + 13;
z_size_t ZEXPORT compressBound_z(z_size_t sourceLen) {
z_size_t bound = sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
(sourceLen >> 25) + 13;
return bound < sourceLen ? (z_size_t)-1 : bound;
}
uLong ZEXPORT compressBound(uLong sourceLen) {
z_size_t bound = compressBound_z(sourceLen);
return (uLong)bound != bound ? (uLong)-1 : (uLong)bound;
}

View File

@ -23,7 +23,7 @@
*/
/* deflate.c -- compress data using the deflation algorithm
* Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler
* Copyright (C) 1995-2026 Jean-loup Gailly and Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -61,7 +61,7 @@
* REFERENCES
*
* Deutsch, L.P.,"DEFLATE Compressed Data Format Specification".
* Available in http://tools.ietf.org/html/rfc1951
* Available at https://datatracker.ietf.org/doc/html/rfc1951
*
* A description of the Rabin and Karp algorithm is given in the book
* "Algorithms" by R. Sedgewick, Addison-Wesley, p252.
@ -76,7 +76,7 @@
#include "deflate.h"
const char deflate_copyright[] =
" deflate 1.3.1 Copyright 1995-2024 Jean-loup Gailly and Mark Adler ";
" deflate 1.3.2 Copyright 1995-2026 Jean-loup Gailly and Mark Adler ";
/*
If you use the zlib library in a product, an acknowledgment is welcome
in the documentation of your product. If for some reason you cannot
@ -194,8 +194,8 @@ local const config configuration_table[10] = {
#define CLEAR_HASH(s) \
do { \
s->head[s->hash_size - 1] = NIL; \
zmemzero((Bytef *)s->head, \
(unsigned)(s->hash_size - 1)*sizeof(*s->head)); \
zmemzero(s->head, (unsigned)(s->hash_size - 1)*sizeof(*s->head)); \
s->slid = 0; \
} while (0)
/* ===========================================================================
@ -219,8 +219,8 @@ local void slide_hash(deflate_state *s) {
m = *--p;
*p = (Pos)(m >= wsize ? m - wsize : NIL);
} while (--n);
n = wsize;
#ifndef FASTEST
n = wsize;
p = &s->prev[n];
do {
m = *--p;
@ -230,6 +230,7 @@ local void slide_hash(deflate_state *s) {
*/
} while (--n);
#endif
s->slid = 1;
}
/* ===========================================================================
@ -283,7 +284,14 @@ local void fill_window(deflate_state *s) {
more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart);
/* Deal with !@#$% 64K limit: */
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4127)
#endif
if (sizeof(int) <= 2) {
#ifdef _MSC_VER
#pragma warning(pop)
#endif
if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
more = wsize;
@ -455,6 +463,7 @@ int ZEXPORT deflateInit2_(z_streamp strm, int level, int method,
if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */
s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state));
if (s == Z_NULL) return Z_MEM_ERROR;
zmemzero(s, sizeof(deflate_state));
strm->state = (struct internal_state FAR *)s;
s->strm = strm;
s->status = INIT_STATE; /* to pass state test in deflateReset() */
@ -736,10 +745,23 @@ int ZEXPORT deflateSetHeader(z_streamp strm, gz_headerp head) {
/* ========================================================================= */
int ZEXPORT deflatePending(z_streamp strm, unsigned *pending, int *bits) {
if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
if (pending != Z_NULL)
*pending = strm->state->pending;
if (bits != Z_NULL)
*bits = strm->state->bi_valid;
if (pending != Z_NULL) {
*pending = (unsigned)strm->state->pending;
if (*pending != strm->state->pending) {
*pending = (unsigned)-1;
return Z_BUF_ERROR;
}
}
return Z_OK;
}
/* ========================================================================= */
int ZEXPORT deflateUsed(z_streamp strm, int *bits) {
if (deflateStateCheck(strm)) return Z_STREAM_ERROR;
if (bits != Z_NULL)
*bits = strm->state->bi_used;
return Z_OK;
}
@ -855,28 +877,34 @@ int ZEXPORT deflateTune(z_streamp strm, int good_length, int max_lazy,
*
* Shifts are used to approximate divisions, for speed.
*/
uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) {
z_size_t ZEXPORT deflateBound_z(z_streamp strm, z_size_t sourceLen) {
deflate_state *s;
uLong fixedlen, storelen, wraplen;
z_size_t fixedlen, storelen, wraplen, bound;
/* upper bound for fixed blocks with 9-bit literals and length 255
(memLevel == 2, which is the lowest that may not use stored blocks) --
~13% overhead plus a small constant */
fixedlen = sourceLen + (sourceLen >> 3) + (sourceLen >> 8) +
(sourceLen >> 9) + 4;
if (fixedlen < sourceLen)
fixedlen = (z_size_t)-1;
/* upper bound for stored blocks with length 127 (memLevel == 1) --
~4% overhead plus a small constant */
storelen = sourceLen + (sourceLen >> 5) + (sourceLen >> 7) +
(sourceLen >> 11) + 7;
if (storelen < sourceLen)
storelen = (z_size_t)-1;
/* if can't get parameters, return larger bound plus a zlib wrapper */
if (deflateStateCheck(strm))
return (fixedlen > storelen ? fixedlen : storelen) + 6;
/* if can't get parameters, return larger bound plus a wrapper */
if (deflateStateCheck(strm)) {
bound = fixedlen > storelen ? fixedlen : storelen;
return bound + 18 < bound ? (z_size_t)-1 : bound + 18;
}
/* compute wrapper length */
s = strm->state;
switch (s->wrap) {
switch (s->wrap < 0 ? -s->wrap : s->wrap) {
case 0: /* raw deflate */
wraplen = 0;
break;
@ -906,18 +934,25 @@ uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) {
break;
#endif
default: /* for compiler happiness */
wraplen = 6;
wraplen = 18;
}
/* if not default parameters, return one of the conservative bounds */
if (s->w_bits != 15 || s->hash_bits != 8 + 7)
return (s->w_bits <= s->hash_bits && s->level ? fixedlen : storelen) +
wraplen;
if (s->w_bits != 15 || s->hash_bits != 8 + 7) {
bound = s->w_bits <= s->hash_bits && s->level ? fixedlen :
storelen;
return bound + wraplen < bound ? (z_size_t)-1 : bound + wraplen;
}
/* default settings: return tight bound for that case -- ~0.03% overhead
plus a small constant */
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
(sourceLen >> 25) + 13 - 6 + wraplen;
bound = sourceLen + (sourceLen >> 12) + (sourceLen >> 14) +
(sourceLen >> 25) + 13 - 6 + wraplen;
return bound < sourceLen ? (z_size_t)-1 : bound;
}
uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen) {
z_size_t bound = deflateBound_z(strm, sourceLen);
return (uLong)bound != bound ? (uLong)-1 : (uLong)bound;
}
/* =========================================================================
@ -941,8 +976,8 @@ local void flush_pending(z_streamp strm) {
deflate_state *s = strm->state;
_tr_flush_bits(s);
len = s->pending;
if (len > strm->avail_out) len = strm->avail_out;
len = s->pending > strm->avail_out ? strm->avail_out :
(unsigned)s->pending;
if (len == 0) return;
zmemcpy(strm->next_out, s->pending_out, len);
@ -962,8 +997,8 @@ local void flush_pending(z_streamp strm) {
#define HCRC_UPDATE(beg) \
do { \
if (s->gzhead->hcrc && s->pending > (beg)) \
strm->adler = crc32(strm->adler, s->pending_buf + (beg), \
s->pending - (beg)); \
strm->adler = crc32_z(strm->adler, s->pending_buf + (beg), \
s->pending - (beg)); \
} while (0)
/* ========================================================================= */
@ -1097,8 +1132,8 @@ int ZEXPORT deflate(z_streamp strm, int flush) {
put_byte(s, (s->gzhead->extra_len >> 8) & 0xff);
}
if (s->gzhead->hcrc)
strm->adler = crc32(strm->adler, s->pending_buf,
s->pending);
strm->adler = crc32_z(strm->adler, s->pending_buf,
s->pending);
s->gzindex = 0;
s->status = EXTRA_STATE;
}
@ -1106,9 +1141,9 @@ int ZEXPORT deflate(z_streamp strm, int flush) {
if (s->status == EXTRA_STATE) {
if (s->gzhead->extra != Z_NULL) {
ulg beg = s->pending; /* start of bytes to update crc */
uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex;
ulg left = (s->gzhead->extra_len & 0xffff) - s->gzindex;
while (s->pending + left > s->pending_buf_size) {
uInt copy = s->pending_buf_size - s->pending;
ulg copy = s->pending_buf_size - s->pending;
zmemcpy(s->pending_buf + s->pending,
s->gzhead->extra + s->gzindex, copy);
s->pending = s->pending_buf_size;
@ -1319,12 +1354,13 @@ int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) {
ss = source->state;
zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
zmemcpy(dest, source, sizeof(z_stream));
ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state));
if (ds == Z_NULL) return Z_MEM_ERROR;
zmemzero(ds, sizeof(deflate_state));
dest->state = (struct internal_state FAR *) ds;
zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state));
zmemcpy(ds, ss, sizeof(deflate_state));
ds->strm = dest;
ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte));
@ -1337,18 +1373,23 @@ int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) {
deflateEnd (dest);
return Z_MEM_ERROR;
}
/* following zmemcpy do not work for 16-bit MSDOS */
zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte));
zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos));
zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos));
zmemcpy(ds->pending_buf, ss->pending_buf, ds->lit_bufsize * LIT_BUFS);
/* following zmemcpy's do not work for 16-bit MSDOS */
zmemcpy(ds->window, ss->window, ss->high_water);
zmemcpy(ds->prev, ss->prev,
(ss->slid || ss->strstart - ss->insert > ds->w_size ? ds->w_size :
ss->strstart - ss->insert) * sizeof(Pos));
zmemcpy(ds->head, ss->head, ds->hash_size * sizeof(Pos));
ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf);
zmemcpy(ds->pending_out, ss->pending_out, ss->pending);
#ifdef LIT_MEM
ds->d_buf = (ushf *)(ds->pending_buf + (ds->lit_bufsize << 1));
ds->l_buf = ds->pending_buf + (ds->lit_bufsize << 2);
zmemcpy(ds->d_buf, ss->d_buf, ss->sym_next * sizeof(ush));
zmemcpy(ds->l_buf, ss->l_buf, ss->sym_next);
#else
ds->sym_buf = ds->pending_buf + ds->lit_bufsize;
zmemcpy(ds->sym_buf, ss->sym_buf, ss->sym_next);
#endif
ds->l_desc.dyn_tree = ds->dyn_ltree;
@ -1371,9 +1412,9 @@ int ZEXPORT deflateCopy(z_streamp dest, z_streamp source) {
*/
local uInt longest_match(deflate_state *s, IPos cur_match) {
unsigned chain_length = s->max_chain_length;/* max hash chain length */
register Bytef *scan = s->window + s->strstart; /* current string */
register Bytef *match; /* matched string */
register int len; /* length of current match */
Bytef *scan = s->window + s->strstart; /* current string */
Bytef *match; /* matched string */
int len; /* length of current match */
int best_len = (int)s->prev_length; /* best match length so far */
int nice_match = s->nice_match; /* stop if match long enough */
IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
@ -1388,13 +1429,13 @@ local uInt longest_match(deflate_state *s, IPos cur_match) {
/* Compare two bytes at a time. Note: this is not always beneficial.
* Try with and without -DUNALIGNED_OK to check.
*/
register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
register ush scan_start = *(ushf*)scan;
register ush scan_end = *(ushf*)(scan + best_len - 1);
Bytef *strend = s->window + s->strstart + MAX_MATCH - 1;
ush scan_start = *(ushf*)scan;
ush scan_end = *(ushf*)(scan + best_len - 1);
#else
register Bytef *strend = s->window + s->strstart + MAX_MATCH;
register Byte scan_end1 = scan[best_len - 1];
register Byte scan_end = scan[best_len];
Bytef *strend = s->window + s->strstart + MAX_MATCH;
Byte scan_end1 = scan[best_len - 1];
Byte scan_end = scan[best_len];
#endif
/* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
@ -1518,10 +1559,10 @@ local uInt longest_match(deflate_state *s, IPos cur_match) {
* Optimized version for FASTEST only
*/
local uInt longest_match(deflate_state *s, IPos cur_match) {
register Bytef *scan = s->window + s->strstart; /* current string */
register Bytef *match; /* matched string */
register int len; /* length of current match */
register Bytef *strend = s->window + s->strstart + MAX_MATCH;
Bytef *scan = s->window + s->strstart; /* current string */
Bytef *match; /* matched string */
int len; /* length of current match */
Bytef *strend = s->window + s->strstart + MAX_MATCH;
/* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
* It is easy to get rid of this optimization if necessary.
@ -1581,7 +1622,7 @@ local uInt longest_match(deflate_state *s, IPos cur_match) {
local void check_match(deflate_state *s, IPos start, IPos match, int length) {
/* check that the match is indeed a match */
Bytef *back = s->window + (int)match, *here = s->window + start;
IPos len = length;
IPos len = (IPos)length;
if (match == (IPos)-1) {
/* match starts one byte before the current window -- just compare the
subsequent length-1 bytes */
@ -1653,13 +1694,14 @@ local block_state deflate_stored(deflate_state *s, int flush) {
* this is 32K. This can be as small as 507 bytes for memLevel == 1. For
* large input and output buffers, the stored block size will be larger.
*/
unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size);
unsigned min_block = (unsigned)(MIN(s->pending_buf_size - 5, s->w_size));
/* Copy as many min_block or larger stored blocks directly to next_out as
* possible. If flushing, copy the remaining available input to next_out as
* stored blocks, if there is enough space.
*/
unsigned len, left, have, last = 0;
int last = 0;
unsigned len, left, have;
unsigned used = s->strm->avail_in;
do {
/* Set len to the maximum size block that we can copy directly with the
@ -1667,12 +1709,12 @@ local block_state deflate_stored(deflate_state *s, int flush) {
* would be copied from what's left in the window.
*/
len = MAX_STORED; /* maximum deflate stored block length */
have = (s->bi_valid + 42) >> 3; /* number of header bytes */
have = ((unsigned)s->bi_valid + 42) >> 3; /* bytes in header */
if (s->strm->avail_out < have) /* need room for header */
break;
/* maximum stored block length that will fit in avail_out: */
have = s->strm->avail_out - have;
left = s->strstart - s->block_start; /* bytes left in window */
left = (unsigned)(s->strstart - s->block_start); /* window bytes */
if (len > (ulg)left + s->strm->avail_in)
len = left + s->strm->avail_in; /* limit len to the input */
if (len > have)
@ -1695,10 +1737,10 @@ local block_state deflate_stored(deflate_state *s, int flush) {
_tr_stored_block(s, (char *)0, 0L, last);
/* Replace the lengths in the dummy stored block with len. */
s->pending_buf[s->pending - 4] = len;
s->pending_buf[s->pending - 3] = len >> 8;
s->pending_buf[s->pending - 2] = ~len;
s->pending_buf[s->pending - 1] = ~len >> 8;
s->pending_buf[s->pending - 4] = (Bytef)len;
s->pending_buf[s->pending - 3] = (Bytef)(len >> 8);
s->pending_buf[s->pending - 2] = (Bytef)~len;
s->pending_buf[s->pending - 1] = (Bytef)(~len >> 8);
/* Write the stored block header bytes. */
flush_pending(s->strm);
@ -1769,8 +1811,10 @@ local block_state deflate_stored(deflate_state *s, int flush) {
s->high_water = s->strstart;
/* If the last block was written to next_out, then done. */
if (last)
if (last) {
s->bi_used = 8;
return finish_done;
}
/* If flushing and all input has been consumed, then done. */
if (flush != Z_NO_FLUSH && flush != Z_FINISH &&
@ -1778,7 +1822,7 @@ local block_state deflate_stored(deflate_state *s, int flush) {
return block_done;
/* Fill the window with any remaining input. */
have = s->window_size - s->strstart;
have = (unsigned)(s->window_size - s->strstart);
if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) {
/* Slide the window down. */
s->block_start -= s->w_size;
@ -1805,11 +1849,11 @@ local block_state deflate_stored(deflate_state *s, int flush) {
* have enough input for a worthy block, or if flushing and there is enough
* room for the remaining input as a stored block in the pending buffer.
*/
have = (s->bi_valid + 42) >> 3; /* number of header bytes */
have = ((unsigned)s->bi_valid + 42) >> 3; /* bytes in header */
/* maximum stored block length that will fit in pending: */
have = MIN(s->pending_buf_size - have, MAX_STORED);
have = (unsigned)MIN(s->pending_buf_size - have, MAX_STORED);
min_block = MIN(have, s->w_size);
left = s->strstart - s->block_start;
left = (unsigned)(s->strstart - s->block_start);
if (left >= min_block ||
((left || flush == Z_FINISH) && flush != Z_NO_FLUSH &&
s->strm->avail_in == 0 && left <= have)) {
@ -1822,6 +1866,8 @@ local block_state deflate_stored(deflate_state *s, int flush) {
}
/* We've done all we can with the available input and output. */
if (last)
s->bi_used = 8;
return last ? finish_started : need_more;
}
@ -1870,7 +1916,7 @@ local block_state deflate_fast(deflate_state *s, int flush) {
/* longest_match() sets match_start */
}
if (s->match_length >= MIN_MATCH) {
check_match(s, s->strstart, s->match_start, s->match_length);
check_match(s, s->strstart, s->match_start, (int)s->match_length);
_tr_tally_dist(s, s->strstart - s->match_start,
s->match_length - MIN_MATCH, bflush);
@ -1992,7 +2038,7 @@ local block_state deflate_slow(deflate_state *s, int flush) {
uInt max_insert = s->strstart + s->lookahead - MIN_MATCH;
/* Do not insert strings in hash table beyond this. */
check_match(s, s->strstart - 1, s->prev_match, s->prev_length);
check_match(s, s->strstart - 1, s->prev_match, (int)s->prev_length);
_tr_tally_dist(s, s->strstart - 1 - s->prev_match,
s->prev_length - MIN_MATCH, bflush);
@ -2100,7 +2146,7 @@ local block_state deflate_rle(deflate_state *s, int flush) {
/* Emit match if have run of MIN_MATCH or longer, else emit literal */
if (s->match_length >= MIN_MATCH) {
check_match(s, s->strstart, s->strstart - 1, s->match_length);
check_match(s, s->strstart, s->strstart - 1, (int)s->match_length);
_tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush);

View File

@ -23,7 +23,7 @@
*/
/* deflate.h -- internal compression state
* Copyright (C) 1995-2024 Jean-loup Gailly
* Copyright (C) 1995-2026 Jean-loup Gailly
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -295,6 +295,9 @@ typedef struct internal_state {
/* Number of valid bits in bi_buf. All bits above the last valid bit
* are always zero.
*/
int bi_used;
/* Last number of used bits when going to a byte boundary.
*/
ulg high_water;
/* High water mark offset in window for initialized bytes -- bytes above
@ -303,6 +306,9 @@ typedef struct internal_state {
* updated to the new high water mark.
*/
int slid;
/* True if the hash table has been slid since it was cleared. */
} FAR deflate_state;
/* Output a byte on the stream.

View File

@ -23,7 +23,7 @@
*/
/* gzguts.h -- zlib internal header definitions for gz* operations
* Copyright (C) 2004-2024 Mark Adler
* Copyright (C) 2004-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -41,6 +41,18 @@
# define ZLIB_INTERNAL
#endif
#if defined(_WIN32)
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
# endif
# ifndef _CRT_NONSTDC_NO_DEPRECATE
# define _CRT_NONSTDC_NO_DEPRECATE
# endif
#endif
#include <stdio.h>
#include "zlib.h"
#ifdef STDC
@ -49,8 +61,8 @@
# include <limits.h>
#endif
#ifndef _POSIX_SOURCE
# define _POSIX_SOURCE
#ifndef _POSIX_C_SOURCE
# define _POSIX_C_SOURCE 200112L
#endif
#include <fcntl.h>
@ -60,19 +72,13 @@
#if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32)
# include <io.h>
# include <sys/stat.h>
#endif
#if defined(_WIN32)
#if defined(_WIN32) && !defined(WIDECHAR)
# define WIDECHAR
#endif
#ifdef WINAPI_FAMILY
# define open _open
# define read _read
# define write _write
# define close _close
#endif
#ifdef NO_DEFLATE /* for compatibility with old definition */
# define NO_GZCOMPRESS
#endif
@ -96,33 +102,28 @@
#endif
#ifndef HAVE_VSNPRINTF
# ifdef MSDOS
# if !defined(NO_vsnprintf) && \
(defined(MSDOS) || defined(__TURBOC__) || defined(__SASC) || \
defined(VMS) || defined(__OS400) || defined(__MVS__))
/* vsnprintf may exist on some MS-DOS compilers (DJGPP?),
but for now we just assume it doesn't. */
# define NO_vsnprintf
# endif
# ifdef __TURBOC__
# define NO_vsnprintf
# endif
# ifdef WIN32
/* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */
# if !defined(vsnprintf) && !defined(NO_vsnprintf)
# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 )
# define vsnprintf _vsnprintf
# if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 )
# ifndef vsnprintf
# define vsnprintf _vsnprintf
# endif
# endif
# endif
# ifdef __SASC
# define NO_vsnprintf
# endif
# ifdef VMS
# define NO_vsnprintf
# endif
# ifdef __OS400__
# define NO_vsnprintf
# endif
# ifdef __MVS__
# define NO_vsnprintf
# elif !defined(__STDC_VERSION__) || __STDC_VERSION__-0 < 199901L
/* Otherwise if C89/90, assume no C99 snprintf() or vsnprintf() */
# ifndef NO_snprintf
# define NO_snprintf
# endif
# ifndef NO_vsnprintf
# define NO_vsnprintf
# endif
# endif
#endif
@ -206,7 +207,9 @@ typedef struct {
unsigned char *out; /* output buffer (double-sized when reading) */
int direct; /* 0 if processing gzip, 1 if transparent */
/* just for reading */
int junk; /* -1 = start, 1 = junk candidate, 0 = in gzip */
int how; /* 0: get header, 1: copy, 2: decompress */
int again; /* true if EAGAIN or EWOULDBLOCK on last i/o */
z_off64_t start; /* where the gzip data started, for rewinding */
int eof; /* true if end of input file reached */
int past; /* true if read requested past end */
@ -216,7 +219,6 @@ typedef struct {
int reset; /* true if a reset is pending after a Z_FINISH */
/* seek request */
z_off64_t skip; /* amount to skip (already rewound if backwards) */
int seek; /* true if seek request pending */
/* error information */
int err; /* error code */
char *msg; /* error message */

View File

@ -23,21 +23,21 @@
*/
/* gzlib.c -- zlib functions common to reading and writing gzip files
* Copyright (C) 2004-2024 Mark Adler
* Copyright (C) 2004-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "gzguts.h"
#if defined(_WIN32) && !defined(__BORLANDC__)
#if defined(__DJGPP__)
# define LSEEK llseek
#elif defined(_WIN32) && !defined(__BORLANDC__) && !defined(UNDER_CE)
# define LSEEK _lseeki64
#else
#if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0
#elif defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0
# define LSEEK lseek64
#else
# define LSEEK lseek
#endif
#endif
#if defined UNDER_CE
@ -76,7 +76,7 @@ char ZLIB_INTERNAL *gz_strwinerror(DWORD error) {
msgbuf[chars] = 0;
}
wcstombs(buf, msgbuf, chars + 1);
wcstombs(buf, msgbuf, chars + 1); /* assumes buf is big enough */
LocalFree(msgbuf);
}
else {
@ -96,10 +96,12 @@ local void gz_reset(gz_statep state) {
state->eof = 0; /* not at end of file */
state->past = 0; /* have not read past end yet */
state->how = LOOK; /* look for gzip header */
state->junk = -1; /* mark first member */
}
else /* for writing ... */
state->reset = 0; /* no deflateReset pending */
state->seek = 0; /* no seek request pending */
state->again = 0; /* no stalled i/o yet */
state->skip = 0; /* no seek request pending */
gz_error(state, Z_OK, NULL); /* clear error */
state->x.pos = 0; /* no uncompressed data yet */
state->strm.avail_in = 0; /* no input data yet */
@ -109,16 +111,13 @@ local void gz_reset(gz_statep state) {
local gzFile gz_open(const void *path, int fd, const char *mode) {
gz_statep state;
z_size_t len;
int oflag;
#ifdef O_CLOEXEC
int cloexec = 0;
#endif
int oflag = 0;
#ifdef O_EXCL
int exclusive = 0;
#endif
/* check input */
if (path == NULL)
if (path == NULL || mode == NULL)
return NULL;
/* allocate gzFile structure to return */
@ -127,6 +126,7 @@ local gzFile gz_open(const void *path, int fd, const char *mode) {
return NULL;
state->size = 0; /* no buffers allocated yet */
state->want = GZBUFSIZE; /* requested buffer size */
state->err = Z_OK; /* no error yet */
state->msg = NULL; /* no error message yet */
/* interpret mode */
@ -157,7 +157,7 @@ local gzFile gz_open(const void *path, int fd, const char *mode) {
break;
#ifdef O_CLOEXEC
case 'e':
cloexec = 1;
oflag |= O_CLOEXEC;
break;
#endif
#ifdef O_EXCL
@ -177,6 +177,14 @@ local gzFile gz_open(const void *path, int fd, const char *mode) {
case 'F':
state->strategy = Z_FIXED;
break;
case 'G':
state->direct = -1;
break;
#ifdef O_NONBLOCK
case 'N':
oflag |= O_NONBLOCK;
break;
#endif
case 'T':
state->direct = 1;
break;
@ -192,22 +200,30 @@ local gzFile gz_open(const void *path, int fd, const char *mode) {
return NULL;
}
/* can't force transparent read */
/* direct is 0, 1 if "T", or -1 if "G" (last "G" or "T" wins) */
if (state->mode == GZ_READ) {
if (state->direct) {
if (state->direct == 1) {
/* can't force a transparent read */
free(state);
return NULL;
}
state->direct = 1; /* for empty file */
if (state->direct == 0)
/* default when reading is auto-detect of gzip vs. transparent --
start with a transparent assumption in case of an empty file */
state->direct = 1;
}
else if (state->direct == -1) {
/* "G" has no meaning when writing -- disallow it */
free(state);
return NULL;
}
/* if reading, direct == 1 for auto-detect, -1 for gzip only; if writing or
appending, direct == 0 for gzip, 1 for transparent (copy in to out) */
/* save the path name for error messages */
#ifdef WIDECHAR
if (fd == -2) {
if (fd == -2)
len = wcstombs(NULL, path, 0);
if (len == (z_size_t)-1)
len = 0;
}
else
#endif
len = strlen((const char *)path);
@ -217,29 +233,29 @@ local gzFile gz_open(const void *path, int fd, const char *mode) {
return NULL;
}
#ifdef WIDECHAR
if (fd == -2)
if (fd == -2) {
if (len)
wcstombs(state->path, path, len + 1);
else
*(state->path) = 0;
}
else
#endif
{
#if !defined(NO_snprintf) && !defined(NO_vsnprintf)
(void)snprintf(state->path, len + 1, "%s", (const char *)path);
#else
strcpy(state->path, path);
#endif
}
/* compute the flags for open() */
oflag =
oflag |=
#ifdef O_LARGEFILE
O_LARGEFILE |
#endif
#ifdef O_BINARY
O_BINARY |
#endif
#ifdef O_CLOEXEC
(cloexec ? O_CLOEXEC : 0) |
#endif
(state->mode == GZ_READ ?
O_RDONLY :
@ -252,11 +268,23 @@ local gzFile gz_open(const void *path, int fd, const char *mode) {
O_APPEND)));
/* open the file with the appropriate flags (or just use fd) */
state->fd = fd > -1 ? fd : (
if (fd == -1)
state->fd = open((const char *)path, oflag, 0666);
#ifdef WIDECHAR
fd == -2 ? _wopen(path, oflag, 0666) :
else if (fd == -2)
state->fd = _wopen(path, oflag, _S_IREAD | _S_IWRITE);
#endif
open((const char *)path, oflag, 0666));
else {
#ifdef O_NONBLOCK
if (oflag & O_NONBLOCK)
fcntl(fd, F_SETFL, fcntl(fd, F_GETFL) | O_NONBLOCK);
#endif
#ifdef O_CLOEXEC
if (oflag & O_CLOEXEC)
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | O_CLOEXEC);
#endif
state->fd = fd;
}
if (state->fd == -1) {
free(state->path);
free(state);
@ -383,9 +411,10 @@ z_off64_t ZEXPORT gzseek64(gzFile file, z_off64_t offset, int whence) {
/* normalize offset to a SEEK_CUR specification */
if (whence == SEEK_SET)
offset -= state->x.pos;
else if (state->seek)
offset += state->skip;
state->seek = 0;
else {
offset += state->past ? 0 : state->skip;
state->skip = 0;
}
/* if within raw area while reading, just go there */
if (state->mode == GZ_READ && state->how == COPY &&
@ -396,7 +425,7 @@ z_off64_t ZEXPORT gzseek64(gzFile file, z_off64_t offset, int whence) {
state->x.have = 0;
state->eof = 0;
state->past = 0;
state->seek = 0;
state->skip = 0;
gz_error(state, Z_OK, NULL);
state->strm.avail_in = 0;
state->x.pos += offset;
@ -425,10 +454,7 @@ z_off64_t ZEXPORT gzseek64(gzFile file, z_off64_t offset, int whence) {
}
/* request skip (if not zero) */
if (offset) {
state->seek = 1;
state->skip = offset;
}
state->skip = offset;
return state->x.pos + offset;
}
@ -452,7 +478,7 @@ z_off64_t ZEXPORT gztell64(gzFile file) {
return -1;
/* return position */
return state->x.pos + (state->seek ? state->skip : 0);
return state->x.pos + (state->past ? 0 : state->skip);
}
/* -- see zlib.h -- */
@ -559,7 +585,7 @@ void ZLIB_INTERNAL gz_error(gz_statep state, int err, const char *msg) {
}
/* if fatal, set state->x.have to 0 so that the gzgetc() macro fails */
if (err != Z_OK && err != Z_BUF_ERROR)
if (err != Z_OK && err != Z_BUF_ERROR && !state->again)
state->x.have = 0;
/* set error code, and if no message, then done */
@ -596,6 +622,7 @@ unsigned ZLIB_INTERNAL gz_intmax(void) {
return INT_MAX;
#else
unsigned p = 1, q;
do {
q = p;
p <<= 1;

View File

@ -23,7 +23,7 @@
*/
/* gzread.c -- zlib functions for reading gzip files
* Copyright (C) 2004-2017 Mark Adler
* Copyright (C) 2004-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -32,23 +32,36 @@
/* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from
state->fd, and update state->eof, state->err, and state->msg as appropriate.
This function needs to loop on read(), since read() is not guaranteed to
read the number of bytes requested, depending on the type of descriptor. */
read the number of bytes requested, depending on the type of descriptor. It
also needs to loop to manage the fact that read() returns an int. If the
descriptor is non-blocking and read() returns with no data in order to avoid
blocking, then gz_load() will return 0 if some data has been read, or -1 if
no data has been read. Either way, state->again is set true to indicate a
non-blocking event. If errno is non-zero on return, then there was an error
signaled from read(). *have is set to the number of bytes read. */
local int gz_load(gz_statep state, unsigned char *buf, unsigned len,
unsigned *have) {
int ret;
unsigned get, max = ((unsigned)-1 >> 2) + 1;
state->again = 0;
errno = 0;
*have = 0;
do {
get = len - *have;
if (get > max)
get = max;
ret = read(state->fd, buf + *have, get);
ret = (int)read(state->fd, buf + *have, get);
if (ret <= 0)
break;
*have += (unsigned)ret;
} while (*have < len);
if (ret < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
state->again = 1;
if (*have != 0)
return 0;
}
gz_error(state, Z_ERRNO, zstrerror());
return -1;
}
@ -74,10 +87,14 @@ local int gz_avail(gz_statep state) {
if (strm->avail_in) { /* copy what's there to the start */
unsigned char *p = state->in;
unsigned const char *q = strm->next_in;
unsigned n = strm->avail_in;
do {
*p++ = *q++;
} while (--n);
if (q != p) {
unsigned n = strm->avail_in;
do {
*p++ = *q++;
} while (--n);
}
}
if (gz_load(state, state->in + strm->avail_in,
state->size - strm->avail_in, &got) == -1)
@ -128,39 +145,44 @@ local int gz_look(gz_statep state) {
}
}
/* get at least the magic bytes in the input buffer */
if (strm->avail_in < 2) {
if (gz_avail(state) == -1)
return -1;
if (strm->avail_in == 0)
return 0;
}
/* look for gzip magic bytes -- if there, do gzip decoding (note: there is
a logical dilemma here when considering the case of a partially written
gzip file, to wit, if a single 31 byte is written, then we cannot tell
whether this is a single-byte file, or just a partially written gzip
file -- for here we assume that if a gzip file is being written, then
the header will be written in a single operation, so that reading a
single byte is sufficient indication that it is not a gzip file) */
if (strm->avail_in > 1 &&
strm->next_in[0] == 31 && strm->next_in[1] == 139) {
/* if transparent reading is disabled, which would only be at the start, or
if we're looking for a gzip member after the first one, which is not at
the start, then proceed directly to look for a gzip member next */
if (state->direct == -1 || state->junk == 0) {
inflateReset(strm);
state->how = GZIP;
state->junk = state->junk != -1;
state->direct = 0;
return 0;
}
/* no gzip header -- if we were decoding gzip before, then this is trailing
garbage. Ignore the trailing garbage and finish. */
if (state->direct == 0) {
strm->avail_in = 0;
state->eof = 1;
state->x.have = 0;
/* otherwise we're at the start with auto-detect -- we check to see if the
first four bytes could be gzip header in order to decide whether or not
this will be a transparent read */
/* load any header bytes into the input buffer -- if the input is empty,
then it's not an error as this is a transparent read of zero bytes */
if (gz_avail(state) == -1)
return -1;
if (strm->avail_in == 0 || (state->again && strm->avail_in < 4))
/* if non-blocking input stalled before getting four bytes, then
return and wait until a later call has accumulated enough */
return 0;
/* see if this is (likely) gzip input -- if the first four bytes are
consistent with a gzip header, then go look for the first gzip member,
otherwise proceed to copy the input transparently */
if (strm->avail_in > 3 &&
strm->next_in[0] == 31 && strm->next_in[1] == 139 &&
strm->next_in[2] == 8 && strm->next_in[3] < 32) {
inflateReset(strm);
state->how = GZIP;
state->junk = 1;
state->direct = 0;
return 0;
}
/* doing raw i/o, copy any leftover input to output -- this assumes that
/* doing raw i/o: copy any leftover input to output -- this assumes that
the output buffer is larger than the input buffer, which also assures
space for gzungetc() */
state->x.next = state->out;
@ -168,15 +190,17 @@ local int gz_look(gz_statep state) {
state->x.have = strm->avail_in;
strm->avail_in = 0;
state->how = COPY;
state->direct = 1;
return 0;
}
/* Decompress from input to the provided next_out and avail_out in the state.
On return, state->x.have and state->x.next point to the just decompressed
data. If the gzip stream completes, state->how is reset to LOOK to look for
the next gzip stream or raw data, once state->x.have is depleted. Returns 0
on success, -1 on failure. */
data. If the gzip stream completes, state->how is reset to LOOK to look for
the next gzip stream or raw data, once state->x.have is depleted. Returns 0
on success, -1 on failure. If EOF is reached when looking for more input to
complete the gzip member, then an unexpected end of file error is raised.
If there is no more input, but state->again is true, then EOF has not been
reached, and no error is raised. */
local int gz_decomp(gz_statep state) {
int ret = Z_OK;
unsigned had;
@ -186,28 +210,41 @@ local int gz_decomp(gz_statep state) {
had = strm->avail_out;
do {
/* get more input for inflate() */
if (strm->avail_in == 0 && gz_avail(state) == -1)
return -1;
if (strm->avail_in == 0 && gz_avail(state) == -1) {
ret = state->err;
break;
}
if (strm->avail_in == 0) {
gz_error(state, Z_BUF_ERROR, "unexpected end of file");
if (!state->again)
gz_error(state, Z_BUF_ERROR, "unexpected end of file");
break;
}
/* decompress and handle errors */
ret = inflate(strm, Z_NO_FLUSH);
if (strm->avail_out < had)
/* any decompressed data marks this as a real gzip stream */
state->junk = 0;
if (ret == Z_STREAM_ERROR || ret == Z_NEED_DICT) {
gz_error(state, Z_STREAM_ERROR,
"internal error: inflate stream corrupt");
return -1;
break;
}
if (ret == Z_MEM_ERROR) {
gz_error(state, Z_MEM_ERROR, "out of memory");
return -1;
break;
}
if (ret == Z_DATA_ERROR) { /* deflate stream invalid */
if (state->junk == 1) { /* trailing garbage is ok */
strm->avail_in = 0;
state->eof = 1;
state->how = LOOK;
ret = Z_OK;
break;
}
gz_error(state, Z_DATA_ERROR,
strm->msg == NULL ? "compressed data error" : strm->msg);
return -1;
break;
}
} while (strm->avail_out && ret != Z_STREAM_END);
@ -216,11 +253,14 @@ local int gz_decomp(gz_statep state) {
state->x.next = strm->next_out - state->x.have;
/* if the gzip stream completed successfully, look for another */
if (ret == Z_STREAM_END)
if (ret == Z_STREAM_END) {
state->junk = 0;
state->how = LOOK;
return 0;
}
/* good decompression */
return 0;
/* return decompression status */
return ret != Z_OK ? -1 : 0;
}
/* Fetch data and put it in the output buffer. Assumes state->x.have is 0.
@ -251,25 +291,31 @@ local int gz_fetch(gz_statep state) {
strm->next_out = state->out;
if (gz_decomp(state) == -1)
return -1;
break;
default:
gz_error(state, Z_STREAM_ERROR, "state corrupt");
return -1;
}
} while (state->x.have == 0 && (!state->eof || strm->avail_in));
return 0;
}
/* Skip len uncompressed bytes of output. Return -1 on error, 0 on success. */
local int gz_skip(gz_statep state, z_off64_t len) {
/* Skip state->skip (> 0) uncompressed bytes of output. Return -1 on error, 0
on success. */
local int gz_skip(gz_statep state) {
unsigned n;
/* skip over len bytes or reach end-of-file, whichever comes first */
while (len)
do {
/* skip over whatever is in output buffer */
if (state->x.have) {
n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > len ?
(unsigned)len : state->x.have;
n = GT_OFF(state->x.have) ||
(z_off64_t)state->x.have > state->skip ?
(unsigned)state->skip : state->x.have;
state->x.have -= n;
state->x.next += n;
state->x.pos += n;
len -= n;
state->skip -= n;
}
/* output buffer empty -- return if we're at the end of the input */
@ -282,30 +328,32 @@ local int gz_skip(gz_statep state, z_off64_t len) {
if (gz_fetch(state) == -1)
return -1;
}
} while (state->skip);
return 0;
}
/* Read len bytes into buf from file, or less than len up to the end of the
input. Return the number of bytes read. If zero is returned, either the
end of file was reached, or there was an error. state->err must be
consulted in that case to determine which. */
input. Return the number of bytes read. If zero is returned, either the end
of file was reached, or there was an error. state->err must be consulted in
that case to determine which. If there was an error, but some uncompressed
bytes were read before the error, then that count is returned. The error is
still recorded, and so is deferred until the next call. */
local z_size_t gz_read(gz_statep state, voidp buf, z_size_t len) {
z_size_t got;
unsigned n;
int err;
/* if len is zero, avoid unnecessary operations */
if (len == 0)
return 0;
/* process a skip request */
if (state->seek) {
state->seek = 0;
if (gz_skip(state, state->skip) == -1)
return 0;
}
if (state->skip && gz_skip(state) == -1)
return 0;
/* get len bytes to buf, or less than len if at the end */
got = 0;
err = 0;
do {
/* set n to the maximum amount of len that fits in an unsigned int */
n = (unsigned)-1;
@ -319,37 +367,36 @@ local z_size_t gz_read(gz_statep state, voidp buf, z_size_t len) {
memcpy(buf, state->x.next, n);
state->x.next += n;
state->x.have -= n;
if (state->err != Z_OK)
/* caught deferred error from gz_fetch() */
err = -1;
}
/* output buffer empty -- return if we're at the end of the input */
else if (state->eof && state->strm.avail_in == 0) {
state->past = 1; /* tried to read past end */
else if (state->eof && state->strm.avail_in == 0)
break;
}
/* need output data -- for small len or new stream load up our output
buffer */
buffer, so that gzgetc() can be fast */
else if (state->how == LOOK || n < (state->size << 1)) {
/* get more output, looking for header if required */
if (gz_fetch(state) == -1)
return 0;
if (gz_fetch(state) == -1 && state->x.have == 0)
/* if state->x.have != 0, error will be caught after copy */
err = -1;
continue; /* no progress yet -- go back to copy above */
/* the copy above assures that we will leave with space in the
output buffer, allowing at least one gzungetc() to succeed */
}
/* large len -- read directly into user buffer */
else if (state->how == COPY) { /* read directly */
if (gz_load(state, (unsigned char *)buf, n, &n) == -1)
return 0;
}
else if (state->how == COPY) /* read directly */
err = gz_load(state, (unsigned char *)buf, n, &n);
/* large len -- decompress directly into user buffer */
else { /* state->how == GZIP */
state->strm.avail_out = n;
state->strm.next_out = (unsigned char *)buf;
if (gz_decomp(state) == -1)
return 0;
err = gz_decomp(state);
n = state->x.have;
state->x.have = 0;
}
@ -359,7 +406,11 @@ local z_size_t gz_read(gz_statep state, voidp buf, z_size_t len) {
buf = (char *)buf + n;
got += n;
state->x.pos += n;
} while (len);
} while (len && !err);
/* note read past eof */
if (len && state->eof)
state->past = 1;
/* return number of bytes read into user buffer */
return got;
@ -369,16 +420,18 @@ local z_size_t gz_read(gz_statep state, voidp buf, z_size_t len) {
int ZEXPORT gzread(gzFile file, voidp buf, unsigned len) {
gz_statep state;
/* get internal structure */
/* get internal structure and check that it's for reading */
if (file == NULL)
return -1;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
if (state->mode != GZ_READ)
return -1;
/* check that there was no (serious) error */
if (state->err != Z_OK && state->err != Z_BUF_ERROR && !state->again)
return -1;
gz_error(state, Z_OK, NULL);
/* since an int is returned, make sure len fits in one, otherwise return
with an error (this avoids a flaw in the interface) */
if ((int)len < 0) {
@ -390,28 +443,40 @@ int ZEXPORT gzread(gzFile file, voidp buf, unsigned len) {
len = (unsigned)gz_read(state, buf, len);
/* check for an error */
if (len == 0 && state->err != Z_OK && state->err != Z_BUF_ERROR)
return -1;
if (len == 0) {
if (state->err != Z_OK && state->err != Z_BUF_ERROR)
return -1;
if (state->again) {
/* non-blocking input stalled after some input was read, but no
uncompressed bytes were produced -- let the application know
this isn't EOF */
gz_error(state, Z_ERRNO, zstrerror());
return -1;
}
}
/* return the number of bytes read (this is assured to fit in an int) */
/* return the number of bytes read */
return (int)len;
}
/* -- see zlib.h -- */
z_size_t ZEXPORT gzfread(voidp buf, z_size_t size, z_size_t nitems, gzFile file) {
z_size_t ZEXPORT gzfread(voidp buf, z_size_t size, z_size_t nitems,
gzFile file) {
z_size_t len;
gz_statep state;
/* get internal structure */
/* get internal structure and check that it's for reading */
if (file == NULL)
return 0;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
if (state->mode != GZ_READ)
return 0;
/* check that there was no (serious) error */
if (state->err != Z_OK && state->err != Z_BUF_ERROR && !state->again)
return 0;
gz_error(state, Z_OK, NULL);
/* compute bytes to read -- error on overflow */
len = nitems * size;
if (size && len / size != nitems) {
@ -433,16 +498,18 @@ int ZEXPORT gzgetc(gzFile file) {
unsigned char buf[1];
gz_statep state;
/* get internal structure */
/* get internal structure and check that it's for reading */
if (file == NULL)
return -1;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
if (state->mode != GZ_READ)
return -1;
/* check that there was no (serious) error */
if (state->err != Z_OK && state->err != Z_BUF_ERROR && !state->again)
return -1;
gz_error(state, Z_OK, NULL);
/* try output buffer (no need to check for skip request) */
if (state->x.have) {
state->x.have--;
@ -462,26 +529,25 @@ int ZEXPORT gzgetc_(gzFile file) {
int ZEXPORT gzungetc(int c, gzFile file) {
gz_statep state;
/* get internal structure */
/* get internal structure and check that it's for reading */
if (file == NULL)
return -1;
state = (gz_statep)file;
/* in case this was just opened, set up the input buffer */
if (state->mode == GZ_READ && state->how == LOOK && state->x.have == 0)
(void)gz_look(state);
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
if (state->mode != GZ_READ)
return -1;
/* in case this was just opened, set up the input buffer */
if (state->how == LOOK && state->x.have == 0)
(void)gz_look(state);
/* check that there was no (serious) error */
if (state->err != Z_OK && state->err != Z_BUF_ERROR && !state->again)
return -1;
gz_error(state, Z_OK, NULL);
/* process a skip request */
if (state->seek) {
state->seek = 0;
if (gz_skip(state, state->skip) == -1)
return -1;
}
if (state->skip && gz_skip(state) == -1)
return -1;
/* can't push EOF */
if (c < 0)
@ -507,6 +573,7 @@ int ZEXPORT gzungetc(int c, gzFile file) {
if (state->x.next == state->out) {
unsigned char *src = state->out + state->x.have;
unsigned char *dest = state->out + (state->size << 1);
while (src > state->out)
*--dest = *--src;
state->x.next = dest;
@ -526,32 +593,31 @@ char * ZEXPORT gzgets(gzFile file, char *buf, int len) {
unsigned char *eol;
gz_statep state;
/* check parameters and get internal structure */
/* check parameters, get internal structure, and check that it's for
reading */
if (file == NULL || buf == NULL || len < 1)
return NULL;
state = (gz_statep)file;
/* check that we're reading and that there's no (serious) error */
if (state->mode != GZ_READ ||
(state->err != Z_OK && state->err != Z_BUF_ERROR))
if (state->mode != GZ_READ)
return NULL;
/* process a skip request */
if (state->seek) {
state->seek = 0;
if (gz_skip(state, state->skip) == -1)
return NULL;
}
/* check that there was no (serious) error */
if (state->err != Z_OK && state->err != Z_BUF_ERROR && !state->again)
return NULL;
gz_error(state, Z_OK, NULL);
/* copy output bytes up to new line or len - 1, whichever comes first --
append a terminating zero to the string (we don't check for a zero in
the contents, let the user worry about that) */
/* process a skip request */
if (state->skip && gz_skip(state) == -1)
return NULL;
/* copy output up to a new line, len-1 bytes, or there is no more output,
whichever comes first */
str = buf;
left = (unsigned)len - 1;
if (left) do {
/* assure that something is in the output buffer */
if (state->x.have == 0 && gz_fetch(state) == -1)
return NULL; /* error */
break; /* error */
if (state->x.have == 0) { /* end of file */
state->past = 1; /* read past end */
break; /* return what we have */
@ -572,7 +638,9 @@ char * ZEXPORT gzgets(gzFile file, char *buf, int len) {
buf += n;
} while (left && eol == NULL);
/* return terminated string, or if nothing, end of file */
/* append a terminating zero to the string (we don't check for a zero in
the contents, let the user worry about that) -- return the terminated
string, or if nothing was read, NULL */
if (buf == str)
return NULL;
buf[0] = 0;
@ -594,7 +662,7 @@ int ZEXPORT gzdirect(gzFile file) {
(void)gz_look(state);
/* return 1 if transparent, 0 if processing a gzip stream */
return state->direct;
return state->direct == 1;
}
/* -- see zlib.h -- */
@ -602,12 +670,10 @@ int ZEXPORT gzclose_r(gzFile file) {
int ret, err;
gz_statep state;
/* get internal structure */
/* get internal structure and check that it's for reading */
if (file == NULL)
return Z_STREAM_ERROR;
state = (gz_statep)file;
/* check that we're reading */
if (state->mode != GZ_READ)
return Z_STREAM_ERROR;

View File

@ -23,7 +23,7 @@
*/
/* gzwrite.c -- zlib functions for writing gzip files
* Copyright (C) 2004-2019 Mark Adler
* Copyright (C) 2004-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -98,9 +98,13 @@ local int gz_comp(gz_statep state, int flush) {
/* write directly if requested */
if (state->direct) {
while (strm->avail_in) {
errno = 0;
state->again = 0;
put = strm->avail_in > max ? max : strm->avail_in;
writ = write(state->fd, strm->next_in, put);
writ = (int)write(state->fd, strm->next_in, put);
if (writ < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
state->again = 1;
gz_error(state, Z_ERRNO, zstrerror());
return -1;
}
@ -112,8 +116,9 @@ local int gz_comp(gz_statep state, int flush) {
/* check for a pending reset */
if (state->reset) {
/* don't start a new gzip member unless there is data to write */
if (strm->avail_in == 0)
/* don't start a new gzip member unless there is data to write and
we're not flushing */
if (strm->avail_in == 0 && flush == Z_NO_FLUSH)
return 0;
deflateReset(strm);
state->reset = 0;
@ -127,10 +132,14 @@ local int gz_comp(gz_statep state, int flush) {
if (strm->avail_out == 0 || (flush != Z_NO_FLUSH &&
(flush != Z_FINISH || ret == Z_STREAM_END))) {
while (strm->next_out > state->x.next) {
errno = 0;
state->again = 0;
put = strm->next_out - state->x.next > (int)max ? max :
(unsigned)(strm->next_out - state->x.next);
writ = write(state->fd, state->x.next, put);
writ = (int)write(state->fd, state->x.next, put);
if (writ < 0) {
if (errno == EAGAIN || errno == EWOULDBLOCK)
state->again = 1;
gz_error(state, Z_ERRNO, zstrerror());
return -1;
}
@ -162,10 +171,12 @@ local int gz_comp(gz_statep state, int flush) {
return 0;
}
/* Compress len zeros to output. Return -1 on a write error or memory
allocation failure by gz_comp(), or 0 on success. */
local int gz_zero(gz_statep state, z_off64_t len) {
int first;
/* Compress state->skip (> 0) zeros to output. Return -1 on a write error or
memory allocation failure by gz_comp(), or 0 on success. state->skip is
updated with the number of successfully written zeros, in case there is a
stall on a non-blocking write destination. */
local int gz_zero(gz_statep state) {
int first, ret;
unsigned n;
z_streamp strm = &(state->strm);
@ -173,29 +184,34 @@ local int gz_zero(gz_statep state, z_off64_t len) {
if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1)
return -1;
/* compress len zeros (len guaranteed > 0) */
/* compress state->skip zeros */
first = 1;
while (len) {
n = GT_OFF(state->size) || (z_off64_t)state->size > len ?
(unsigned)len : state->size;
do {
n = GT_OFF(state->size) || (z_off64_t)state->size > state->skip ?
(unsigned)state->skip : state->size;
if (first) {
memset(state->in, 0, n);
first = 0;
}
strm->avail_in = n;
strm->next_in = state->in;
ret = gz_comp(state, Z_NO_FLUSH);
n -= strm->avail_in;
state->x.pos += n;
if (gz_comp(state, Z_NO_FLUSH) == -1)
state->skip -= n;
if (ret == -1)
return -1;
len -= n;
}
} while (state->skip);
return 0;
}
/* Write len bytes from buf to file. Return the number of bytes written. If
the returned value is less than len, then there was an error. */
the returned value is less than len, then there was an error. If the error
was a non-blocking stall, then the number of bytes consumed is returned.
For any other error, 0 is returned. */
local z_size_t gz_write(gz_statep state, voidpc buf, z_size_t len) {
z_size_t put = len;
int ret;
/* if len is zero, avoid unnecessary operations */
if (len == 0)
@ -206,16 +222,13 @@ local z_size_t gz_write(gz_statep state, voidpc buf, z_size_t len) {
return 0;
/* check for seek request */
if (state->seek) {
state->seek = 0;
if (gz_zero(state, state->skip) == -1)
return 0;
}
if (state->skip && gz_zero(state) == -1)
return 0;
/* for small len, copy to input buffer, otherwise compress directly */
if (len < state->size) {
/* copy to input buffer, compress when full */
do {
for (;;) {
unsigned have, copy;
if (state->strm.avail_in == 0)
@ -230,9 +243,11 @@ local z_size_t gz_write(gz_statep state, voidpc buf, z_size_t len) {
state->x.pos += copy;
buf = (const char *)buf + copy;
len -= copy;
if (len && gz_comp(state, Z_NO_FLUSH) == -1)
return 0;
} while (len);
if (len == 0)
break;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return state->again ? put - len : 0;
}
}
else {
/* consume whatever's left in the input buffer */
@ -243,13 +258,16 @@ local z_size_t gz_write(gz_statep state, voidpc buf, z_size_t len) {
state->strm.next_in = (z_const Bytef *)buf;
do {
unsigned n = (unsigned)-1;
if (n > len)
n = (unsigned)len;
state->strm.avail_in = n;
ret = gz_comp(state, Z_NO_FLUSH);
n -= state->strm.avail_in;
state->x.pos += n;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return 0;
len -= n;
if (ret == -1)
return state->again ? put - len : 0;
} while (len);
}
@ -266,9 +284,10 @@ int ZEXPORT gzwrite(gzFile file, voidpc buf, unsigned len) {
return 0;
state = (gz_statep)file;
/* check that we're writing and that there's no error */
if (state->mode != GZ_WRITE || state->err != Z_OK)
/* check that we're writing and that there's no (serious) error */
if (state->mode != GZ_WRITE || (state->err != Z_OK && !state->again))
return 0;
gz_error(state, Z_OK, NULL);
/* since an int is returned, make sure len fits in one, otherwise return
with an error (this avoids a flaw in the interface) */
@ -292,9 +311,10 @@ z_size_t ZEXPORT gzfwrite(voidpc buf, z_size_t size, z_size_t nitems,
return 0;
state = (gz_statep)file;
/* check that we're writing and that there's no error */
if (state->mode != GZ_WRITE || state->err != Z_OK)
/* check that we're writing and that there's no (serious) error */
if (state->mode != GZ_WRITE || (state->err != Z_OK && !state->again))
return 0;
gz_error(state, Z_OK, NULL);
/* compute bytes to read -- error on overflow */
len = nitems * size;
@ -320,16 +340,14 @@ int ZEXPORT gzputc(gzFile file, int c) {
state = (gz_statep)file;
strm = &(state->strm);
/* check that we're writing and that there's no error */
if (state->mode != GZ_WRITE || state->err != Z_OK)
/* check that we're writing and that there's no (serious) error */
if (state->mode != GZ_WRITE || (state->err != Z_OK && !state->again))
return -1;
gz_error(state, Z_OK, NULL);
/* check for seek request */
if (state->seek) {
state->seek = 0;
if (gz_zero(state, state->skip) == -1)
return -1;
}
if (state->skip && gz_zero(state) == -1)
return -1;
/* try writing to input buffer for speed (state->size == 0 if buffer not
initialized) */
@ -362,9 +380,10 @@ int ZEXPORT gzputs(gzFile file, const char *s) {
return -1;
state = (gz_statep)file;
/* check that we're writing and that there's no error */
if (state->mode != GZ_WRITE || state->err != Z_OK)
/* check that we're writing and that there's no (serious) error */
if (state->mode != GZ_WRITE || (state->err != Z_OK && !state->again))
return -1;
gz_error(state, Z_OK, NULL);
/* write string */
len = strlen(s);
@ -373,16 +392,47 @@ int ZEXPORT gzputs(gzFile file, const char *s) {
return -1;
}
put = gz_write(state, s, len);
return put < len ? -1 : (int)len;
return len && put == 0 ? -1 : (int)put;
}
#if (((!defined(STDC) && !defined(Z_HAVE_STDARG_H)) || !defined(NO_vsnprintf)) && \
(defined(STDC) || defined(Z_HAVE_STDARG_H) || !defined(NO_snprintf))) || \
defined(ZLIB_INSECURE)
/* If the second half of the input buffer is occupied, write out the contents.
If there is any input remaining due to a non-blocking stall on write, move
it to the start of the buffer. Return true if this did not open up the
second half of the buffer. state->err should be checked after this to
handle a gz_comp() error. */
local int gz_vacate(gz_statep state) {
z_streamp strm;
strm = &(state->strm);
if (strm->next_in + strm->avail_in <= state->in + state->size)
return 0;
(void)gz_comp(state, Z_NO_FLUSH);
if (strm->avail_in == 0) {
strm->next_in = state->in;
return 0;
}
memmove(state->in, strm->next_in, strm->avail_in);
strm->next_in = state->in;
return strm->avail_in > state->size;
}
#endif
#if defined(STDC) || defined(Z_HAVE_STDARG_H)
#include <stdarg.h>
/* -- see zlib.h -- */
int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va) {
int len;
unsigned left;
#if defined(NO_vsnprintf) && !defined(ZLIB_INSECURE)
#warning "vsnprintf() not available -- gzprintf() stub returns Z_STREAM_ERROR"
#warning "you can recompile with ZLIB_INSECURE defined to use vsprintf()"
/* prevent use of insecure vsprintf(), unless purposefully requested */
(void)file, (void)format, (void)va;
return Z_STREAM_ERROR;
#else
int len, ret;
char *next;
gz_statep state;
z_streamp strm;
@ -393,24 +443,34 @@ int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va) {
state = (gz_statep)file;
strm = &(state->strm);
/* check that we're writing and that there's no error */
if (state->mode != GZ_WRITE || state->err != Z_OK)
/* check that we're writing and that there's no (serious) error */
if (state->mode != GZ_WRITE || (state->err != Z_OK && !state->again))
return Z_STREAM_ERROR;
gz_error(state, Z_OK, NULL);
/* make sure we have some buffer space */
if (state->size == 0 && gz_init(state) == -1)
return state->err;
/* check for seek request */
if (state->seek) {
state->seek = 0;
if (gz_zero(state, state->skip) == -1)
return state->err;
}
if (state->skip && gz_zero(state) == -1)
return state->err;
/* do the printf() into the input buffer, put length in len -- the input
buffer is double-sized just for this function, so there is guaranteed to
be state->size bytes available after the current contents */
buffer is double-sized just for this function, so there should be
state->size bytes available after the current contents */
ret = gz_vacate(state);
if (state->err) {
if (ret && state->again) {
/* There was a non-blocking stall on write, resulting in the part
of the second half of the output buffer being occupied. Return
a Z_BUF_ERROR to let the application know that this gzprintf()
needs to be retried. */
gz_error(state, Z_BUF_ERROR, "stalled write on gzprintf");
}
if (!state->again)
return state->err;
}
if (strm->avail_in == 0)
strm->next_in = state->in;
next = (char *)(state->in + (strm->next_in - state->in) + strm->avail_in);
@ -436,19 +496,16 @@ int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va) {
if (len == 0 || (unsigned)len >= state->size || next[state->size - 1] != 0)
return 0;
/* update buffer and position, compress first half if past that */
/* update buffer and position */
strm->avail_in += (unsigned)len;
state->x.pos += len;
if (strm->avail_in >= state->size) {
left = strm->avail_in - state->size;
strm->avail_in = state->size;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return state->err;
memmove(state->in, state->in + state->size, left);
strm->next_in = state->in;
strm->avail_in = left;
}
/* write out buffer if more than half is occupied */
ret = gz_vacate(state);
if (state->err && !state->again)
return state->err;
return len;
#endif
}
int ZEXPORTVA gzprintf(gzFile file, const char *format, ...) {
@ -468,6 +525,17 @@ int ZEXPORTVA gzprintf(gzFile file, const char *format, int a1, int a2, int a3,
int a4, int a5, int a6, int a7, int a8, int a9, int a10,
int a11, int a12, int a13, int a14, int a15, int a16,
int a17, int a18, int a19, int a20) {
#if defined(NO_snprintf) && !defined(ZLIB_INSECURE)
#warning "snprintf() not available -- gzprintf() stub returns Z_STREAM_ERROR"
#warning "you can recompile with ZLIB_INSECURE defined to use sprintf()"
/* prevent use of insecure sprintf(), unless purposefully requested */
(void)file, (void)format, (void)a1, (void)a2, (void)a3, (void)a4, (void)a5,
(void)a6, (void)a7, (void)a8, (void)a9, (void)a10, (void)a11, (void)a12,
(void)a13, (void)a14, (void)a15, (void)a16, (void)a17, (void)a18,
(void)a19, (void)a20;
return Z_STREAM_ERROR;
#else
int ret;
unsigned len, left;
char *next;
gz_statep state;
@ -483,24 +551,34 @@ int ZEXPORTVA gzprintf(gzFile file, const char *format, int a1, int a2, int a3,
if (sizeof(int) != sizeof(void *))
return Z_STREAM_ERROR;
/* check that we're writing and that there's no error */
if (state->mode != GZ_WRITE || state->err != Z_OK)
/* check that we're writing and that there's no (serious) error */
if (state->mode != GZ_WRITE || (state->err != Z_OK && !state->again))
return Z_STREAM_ERROR;
gz_error(state, Z_OK, NULL);
/* make sure we have some buffer space */
if (state->size == 0 && gz_init(state) == -1)
return state->error;
return state->err;
/* check for seek request */
if (state->seek) {
state->seek = 0;
if (gz_zero(state, state->skip) == -1)
return state->error;
}
if (state->skip && gz_zero(state) == -1)
return state->err;
/* do the printf() into the input buffer, put length in len -- the input
buffer is double-sized just for this function, so there is guaranteed to
be state->size bytes available after the current contents */
ret = gz_vacate(state);
if (state->err) {
if (ret && state->again) {
/* There was a non-blocking stall on write, resulting in the part
of the second half of the output buffer being occupied. Return
a Z_BUF_ERROR to let the application know that this gzprintf()
needs to be retried. */
gz_error(state, Z_BUF_ERROR, "stalled write on gzprintf");
}
if (!state->again)
return state->err;
}
if (strm->avail_in == 0)
strm->next_in = state->in;
next = (char *)(strm->next_in + strm->avail_in);
@ -534,16 +612,13 @@ int ZEXPORTVA gzprintf(gzFile file, const char *format, int a1, int a2, int a3,
/* update buffer and position, compress first half if past that */
strm->avail_in += len;
state->x.pos += len;
if (strm->avail_in >= state->size) {
left = strm->avail_in - state->size;
strm->avail_in = state->size;
if (gz_comp(state, Z_NO_FLUSH) == -1)
return state->err;
memmove(state->in, state->in + state->size, left);
strm->next_in = state->in;
strm->avail_in = left;
}
/* write out buffer if more than half is occupied */
ret = gz_vacate(state);
if (state->err && !state->again)
return state->err;
return (int)len;
#endif
}
#endif
@ -557,20 +632,18 @@ int ZEXPORT gzflush(gzFile file, int flush) {
return Z_STREAM_ERROR;
state = (gz_statep)file;
/* check that we're writing and that there's no error */
if (state->mode != GZ_WRITE || state->err != Z_OK)
/* check that we're writing and that there's no (serious) error */
if (state->mode != GZ_WRITE || (state->err != Z_OK && !state->again))
return Z_STREAM_ERROR;
gz_error(state, Z_OK, NULL);
/* check flush parameter */
if (flush < 0 || flush > Z_FINISH)
return Z_STREAM_ERROR;
/* check for seek request */
if (state->seek) {
state->seek = 0;
if (gz_zero(state, state->skip) == -1)
return state->err;
}
if (state->skip && gz_zero(state) == -1)
return state->err;
/* compress remaining data with requested flush */
(void)gz_comp(state, flush);
@ -588,20 +661,19 @@ int ZEXPORT gzsetparams(gzFile file, int level, int strategy) {
state = (gz_statep)file;
strm = &(state->strm);
/* check that we're writing and that there's no error */
if (state->mode != GZ_WRITE || state->err != Z_OK || state->direct)
/* check that we're compressing and that there's no (serious) error */
if (state->mode != GZ_WRITE || (state->err != Z_OK && !state->again) ||
state->direct)
return Z_STREAM_ERROR;
gz_error(state, Z_OK, NULL);
/* if no change is requested, then do nothing */
if (level == state->level && strategy == state->strategy)
return Z_OK;
/* check for seek request */
if (state->seek) {
state->seek = 0;
if (gz_zero(state, state->skip) == -1)
return state->err;
}
if (state->skip && gz_zero(state) == -1)
return state->err;
/* change compression parameters for subsequent input */
if (state->size) {
@ -630,11 +702,8 @@ int ZEXPORT gzclose_w(gzFile file) {
return Z_STREAM_ERROR;
/* check for seek request */
if (state->seek) {
state->seek = 0;
if (gz_zero(state, state->skip) == -1)
ret = state->err;
}
if (state->skip && gz_zero(state) == -1)
ret = state->err;
/* flush, free memory, and close file */
if (gz_comp(state, Z_FINISH) == -1)

View File

@ -23,7 +23,7 @@
*/
/* infback.c -- inflate using a call-back interface
* Copyright (C) 1995-2022 Mark Adler
* Copyright (C) 1995-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -70,7 +70,7 @@ int ZEXPORT inflateBackInit_(z_streamp strm, int windowBits,
#ifdef Z_SOLO
return Z_STREAM_ERROR;
#else
strm->zfree = zcfree;
strm->zfree = zcfree;
#endif
state = (struct inflate_state FAR *)ZALLOC(strm, 1,
sizeof(struct inflate_state));
@ -87,57 +87,6 @@ int ZEXPORT inflateBackInit_(z_streamp strm, int windowBits,
return Z_OK;
}
/*
Return state with length and distance decoding tables and index sizes set to
fixed code decoding. Normally this returns fixed tables from inffixed.h.
If BUILDFIXED is defined, then instead this routine builds the tables the
first time it's called, and returns those tables the first time and
thereafter. This reduces the size of the code by about 2K bytes, in
exchange for a little execution time. However, BUILDFIXED should not be
used for threaded applications, since the rewriting of the tables and virgin
may not be thread-safe.
*/
local void fixedtables(struct inflate_state FAR *state) {
#ifdef BUILDFIXED
static int virgin = 1;
static code *lenfix, *distfix;
static code fixed[544];
/* build fixed huffman tables if first call (may not be thread safe) */
if (virgin) {
unsigned sym, bits;
static code *next;
/* literal/length table */
sym = 0;
while (sym < 144) state->lens[sym++] = 8;
while (sym < 256) state->lens[sym++] = 9;
while (sym < 280) state->lens[sym++] = 7;
while (sym < 288) state->lens[sym++] = 8;
next = fixed;
lenfix = next;
bits = 9;
inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work);
/* distance table */
sym = 0;
while (sym < 32) state->lens[sym++] = 5;
distfix = next;
bits = 5;
inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work);
/* do this just once */
virgin = 0;
}
#else /* !BUILDFIXED */
# include "inffixed.h"
#endif /* BUILDFIXED */
state->lencode = lenfix;
state->lenbits = 9;
state->distcode = distfix;
state->distbits = 5;
}
/* Macros for inflateBack(): */
/* Load returned state from inflate_fast() */
@ -317,7 +266,7 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
state->mode = STORED;
break;
case 1: /* fixed block */
fixedtables(state);
inflate_fixed(state);
Tracev((stderr, "inflate: fixed codes block%s\n",
state->last ? " (last)" : ""));
state->mode = LEN; /* decode codes */
@ -327,8 +276,8 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
state->last ? " (last)" : ""));
state->mode = TABLE;
break;
case 3:
strm->msg = (char *)"invalid block type";
default:
strm->msg = (z_const char *)"invalid block type";
state->mode = BAD;
}
DROPBITS(2);
@ -339,7 +288,7 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
BYTEBITS(); /* go to byte boundary */
NEEDBITS(32);
if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
strm->msg = (char *)"invalid stored block lengths";
strm->msg = (z_const char *)"invalid stored block lengths";
state->mode = BAD;
break;
}
@ -377,7 +326,8 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
DROPBITS(4);
#ifndef PKZIP_BUG_WORKAROUND
if (state->nlen > 286 || state->ndist > 30) {
strm->msg = (char *)"too many length or distance symbols";
strm->msg = (z_const char *)
"too many length or distance symbols";
state->mode = BAD;
break;
}
@ -399,7 +349,7 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
ret = inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid code lengths set";
strm->msg = (z_const char *)"invalid code lengths set";
state->mode = BAD;
break;
}
@ -422,7 +372,8 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
NEEDBITS(here.bits + 2);
DROPBITS(here.bits);
if (state->have == 0) {
strm->msg = (char *)"invalid bit length repeat";
strm->msg = (z_const char *)
"invalid bit length repeat";
state->mode = BAD;
break;
}
@ -445,7 +396,8 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
DROPBITS(7);
}
if (state->have + copy > state->nlen + state->ndist) {
strm->msg = (char *)"invalid bit length repeat";
strm->msg = (z_const char *)
"invalid bit length repeat";
state->mode = BAD;
break;
}
@ -459,7 +411,8 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
/* check for end-of-block code (better have one) */
if (state->lens[256] == 0) {
strm->msg = (char *)"invalid code -- missing end-of-block";
strm->msg = (z_const char *)
"invalid code -- missing end-of-block";
state->mode = BAD;
break;
}
@ -473,7 +426,7 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid literal/lengths set";
strm->msg = (z_const char *)"invalid literal/lengths set";
state->mode = BAD;
break;
}
@ -482,7 +435,7 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
if (ret) {
strm->msg = (char *)"invalid distances set";
strm->msg = (z_const char *)"invalid distances set";
state->mode = BAD;
break;
}
@ -541,7 +494,7 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
/* invalid code */
if (here.op & 64) {
strm->msg = (char *)"invalid literal/length code";
strm->msg = (z_const char *)"invalid literal/length code";
state->mode = BAD;
break;
}
@ -573,7 +526,7 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
}
DROPBITS(here.bits);
if (here.op & 64) {
strm->msg = (char *)"invalid distance code";
strm->msg = (z_const char *)"invalid distance code";
state->mode = BAD;
break;
}
@ -588,7 +541,7 @@ int ZEXPORT inflateBack(z_streamp strm, in_func in, void FAR *in_desc,
}
if (state->offset > state->wsize - (state->whave < state->wsize ?
left : 0)) {
strm->msg = (char *)"invalid distance too far back";
strm->msg = (z_const char *)"invalid distance too far back";
state->mode = BAD;
break;
}

View File

@ -23,7 +23,7 @@
*/
/* inffast.c -- fast decoding
* Copyright (C) 1995-2017 Mark Adler
* Copyright (C) 1995-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -179,7 +179,8 @@ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) {
dist += (unsigned)hold & ((1U << op) - 1);
#ifdef INFLATE_STRICT
if (dist > dmax) {
strm->msg = (char *)"invalid distance too far back";
strm->msg = (z_const char *)
"invalid distance too far back";
state->mode = BAD;
break;
}
@ -192,8 +193,8 @@ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) {
op = dist - op; /* distance back in window */
if (op > whave) {
if (state->sane) {
strm->msg =
(char *)"invalid distance too far back";
strm->msg = (z_const char *)
"invalid distance too far back";
state->mode = BAD;
break;
}
@ -289,7 +290,7 @@ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) {
goto dodist;
}
else {
strm->msg = (char *)"invalid distance code";
strm->msg = (z_const char *)"invalid distance code";
state->mode = BAD;
break;
}
@ -304,7 +305,7 @@ void ZLIB_INTERNAL inflate_fast(z_streamp strm, unsigned start) {
break;
}
else {
strm->msg = (char *)"invalid literal/length code";
strm->msg = (z_const char *)"invalid literal/length code";
state->mode = BAD;
break;
}

View File

@ -22,97 +22,97 @@
* questions.
*/
/* inffixed.h -- table for decoding fixed codes
* Generated automatically by makefixed().
*/
/* inffixed.h -- table for decoding fixed codes
* Generated automatically by makefixed().
*/
/* WARNING: this file should *not* be used by applications.
It is part of the implementation of this library and is
subject to change. Applications should only use zlib.h.
*/
/* WARNING: this file should *not* be used by applications.
It is part of the implementation of this library and is
subject to change. Applications should only use zlib.h.
*/
static const code lenfix[512] = {
{96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48},
{0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128},
{0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59},
{0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176},
{0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20},
{21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100},
{0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8},
{0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216},
{18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76},
{0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114},
{0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},
{0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148},
{20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42},
{0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86},
{0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15},
{0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236},
{16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62},
{0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142},
{0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31},
{0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162},
{0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25},
{0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105},
{0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4},
{0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202},
{17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69},
{0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125},
{0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13},
{0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195},
{19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35},
{0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91},
{0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19},
{0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246},
{16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55},
{0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135},
{0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99},
{0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190},
{0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16},
{20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96},
{0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6},
{0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209},
{17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},
{0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116},
{0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4},
{0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153},
{20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44},
{0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82},
{0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11},
{0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229},
{16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58},
{0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138},
{0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51},
{0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173},
{0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30},
{0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110},
{0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0},
{0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195},
{16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65},
{0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121},
{0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},
{0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258},
{19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37},
{0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93},
{0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23},
{0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251},
{16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51},
{0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131},
{0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67},
{0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183},
{0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23},
{64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103},
{0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9},
{0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223},
{18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79},
{0,9,255}
};
static const code lenfix[512] = {
{96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48},
{0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128},
{0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59},
{0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176},
{0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20},
{21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100},
{0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8},
{0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216},
{18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76},
{0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114},
{0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2},
{0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148},
{20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42},
{0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86},
{0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15},
{0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236},
{16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62},
{0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142},
{0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31},
{0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162},
{0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25},
{0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105},
{0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4},
{0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202},
{17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69},
{0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125},
{0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13},
{0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195},
{19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35},
{0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91},
{0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19},
{0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246},
{16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55},
{0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135},
{0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99},
{0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190},
{0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16},
{20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96},
{0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6},
{0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209},
{17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72},
{0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116},
{0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4},
{0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153},
{20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44},
{0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82},
{0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11},
{0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229},
{16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58},
{0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138},
{0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51},
{0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173},
{0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30},
{0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110},
{0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0},
{0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195},
{16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65},
{0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121},
{0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9},
{0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258},
{19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37},
{0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93},
{0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23},
{0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251},
{16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51},
{0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131},
{0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67},
{0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183},
{0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23},
{64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103},
{0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9},
{0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223},
{18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79},
{0,9,255}
};
static const code distfix[32] = {
{16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025},
{21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193},
{18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385},
{19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577},
{16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073},
{22,5,193},{64,5,0}
};
static const code distfix[32] = {
{16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025},
{21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193},
{18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385},
{19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577},
{16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073},
{22,5,193},{64,5,0}
};

View File

@ -23,7 +23,7 @@
*/
/* inflate.c -- zlib decompression
* Copyright (C) 1995-2022 Mark Adler
* Copyright (C) 1995-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -109,12 +109,6 @@
#include "inflate.h"
#include "inffast.h"
#ifdef MAKEFIXED
# ifndef BUILDFIXED
# define BUILDFIXED
# endif
#endif
local int inflateStateCheck(z_streamp strm) {
struct inflate_state FAR *state;
if (strm == Z_NULL ||
@ -134,6 +128,7 @@ int ZEXPORT inflateResetKeep(z_streamp strm) {
state = (struct inflate_state FAR *)strm->state;
strm->total_in = strm->total_out = state->total = 0;
strm->msg = Z_NULL;
strm->data_type = 0;
if (state->wrap) /* to support ill-conceived Java test suite */
strm->adler = state->wrap & 1;
state->mode = HEAD;
@ -226,6 +221,7 @@ int ZEXPORT inflateInit2_(z_streamp strm, int windowBits,
state = (struct inflate_state FAR *)
ZALLOC(strm, 1, sizeof(struct inflate_state));
if (state == Z_NULL) return Z_MEM_ERROR;
zmemzero(state, sizeof(struct inflate_state));
Tracev((stderr, "inflate: allocated\n"));
strm->state = (struct internal_state FAR *)state;
state->strm = strm;
@ -258,123 +254,11 @@ int ZEXPORT inflatePrime(z_streamp strm, int bits, int value) {
}
if (bits > 16 || state->bits + (uInt)bits > 32) return Z_STREAM_ERROR;
value &= (1L << bits) - 1;
state->hold += (unsigned)value << state->bits;
state->hold += (unsigned long)value << state->bits;
state->bits += (uInt)bits;
return Z_OK;
}
/*
Return state with length and distance decoding tables and index sizes set to
fixed code decoding. Normally this returns fixed tables from inffixed.h.
If BUILDFIXED is defined, then instead this routine builds the tables the
first time it's called, and returns those tables the first time and
thereafter. This reduces the size of the code by about 2K bytes, in
exchange for a little execution time. However, BUILDFIXED should not be
used for threaded applications, since the rewriting of the tables and virgin
may not be thread-safe.
*/
local void fixedtables(struct inflate_state FAR *state) {
#ifdef BUILDFIXED
static int virgin = 1;
static code *lenfix, *distfix;
static code fixed[544];
/* build fixed huffman tables if first call (may not be thread safe) */
if (virgin) {
unsigned sym, bits;
static code *next;
/* literal/length table */
sym = 0;
while (sym < 144) state->lens[sym++] = 8;
while (sym < 256) state->lens[sym++] = 9;
while (sym < 280) state->lens[sym++] = 7;
while (sym < 288) state->lens[sym++] = 8;
next = fixed;
lenfix = next;
bits = 9;
inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work);
/* distance table */
sym = 0;
while (sym < 32) state->lens[sym++] = 5;
distfix = next;
bits = 5;
inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work);
/* do this just once */
virgin = 0;
}
#else /* !BUILDFIXED */
# include "inffixed.h"
#endif /* BUILDFIXED */
state->lencode = lenfix;
state->lenbits = 9;
state->distcode = distfix;
state->distbits = 5;
}
#ifdef MAKEFIXED
#include <stdio.h>
/*
Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also
defines BUILDFIXED, so the tables are built on the fly. makefixed() writes
those tables to stdout, which would be piped to inffixed.h. A small program
can simply call makefixed to do this:
void makefixed(void);
int main(void)
{
makefixed();
return 0;
}
Then that can be linked with zlib built with MAKEFIXED defined and run:
a.out > inffixed.h
*/
void makefixed(void)
{
unsigned low, size;
struct inflate_state state;
fixedtables(&state);
puts(" /* inffixed.h -- table for decoding fixed codes");
puts(" * Generated automatically by makefixed().");
puts(" */");
puts("");
puts(" /* WARNING: this file should *not* be used by applications.");
puts(" It is part of the implementation of this library and is");
puts(" subject to change. Applications should only use zlib.h.");
puts(" */");
puts("");
size = 1U << 9;
printf(" static const code lenfix[%u] = {", size);
low = 0;
for (;;) {
if ((low % 7) == 0) printf("\n ");
printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op,
state.lencode[low].bits, state.lencode[low].val);
if (++low == size) break;
putchar(',');
}
puts("\n };");
size = 1U << 5;
printf("\n static const code distfix[%u] = {", size);
low = 0;
for (;;) {
if ((low % 6) == 0) printf("\n ");
printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits,
state.distcode[low].val);
if (++low == size) break;
putchar(',');
}
puts("\n };");
}
#endif /* MAKEFIXED */
/*
Update the window with the last wsize (normally 32K) bytes written before
returning. If window does not exist yet, create it. This is only called
@ -666,12 +550,12 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
if (
#endif
((BITS(8) << 8) + (hold >> 8)) % 31) {
strm->msg = (char *)"incorrect header check";
strm->msg = (z_const char *)"incorrect header check";
state->mode = BAD;
break;
}
if (BITS(4) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
strm->msg = (z_const char *)"unknown compression method";
state->mode = BAD;
break;
}
@ -680,7 +564,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
if (state->wbits == 0)
state->wbits = len;
if (len > 15 || len > state->wbits) {
strm->msg = (char *)"invalid window size";
strm->msg = (z_const char *)"invalid window size";
state->mode = BAD;
break;
}
@ -696,12 +580,12 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
NEEDBITS(16);
state->flags = (int)(hold);
if ((state->flags & 0xff) != Z_DEFLATED) {
strm->msg = (char *)"unknown compression method";
strm->msg = (z_const char *)"unknown compression method";
state->mode = BAD;
break;
}
if (state->flags & 0xe000) {
strm->msg = (char *)"unknown header flags set";
strm->msg = (z_const char *)"unknown header flags set";
state->mode = BAD;
break;
}
@ -817,7 +701,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
if (state->flags & 0x0200) {
NEEDBITS(16);
if ((state->wrap & 4) && hold != (state->check & 0xffff)) {
strm->msg = (char *)"header crc mismatch";
strm->msg = (z_const char *)"header crc mismatch";
state->mode = BAD;
break;
}
@ -864,7 +748,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
state->mode = STORED;
break;
case 1: /* fixed block */
fixedtables(state);
inflate_fixed(state);
Tracev((stderr, "inflate: fixed codes block%s\n",
state->last ? " (last)" : ""));
state->mode = LEN_; /* decode codes */
@ -878,8 +762,8 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
state->last ? " (last)" : ""));
state->mode = TABLE;
break;
case 3:
strm->msg = (char *)"invalid block type";
default:
strm->msg = (z_const char *)"invalid block type";
state->mode = BAD;
}
DROPBITS(2);
@ -888,7 +772,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
BYTEBITS(); /* go to byte boundary */
NEEDBITS(32);
if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) {
strm->msg = (char *)"invalid stored block lengths";
strm->msg = (z_const char *)"invalid stored block lengths";
state->mode = BAD;
break;
}
@ -929,7 +813,8 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
DROPBITS(4);
#ifndef PKZIP_BUG_WORKAROUND
if (state->nlen > 286 || state->ndist > 30) {
strm->msg = (char *)"too many length or distance symbols";
strm->msg = (z_const char *)
"too many length or distance symbols";
state->mode = BAD;
break;
}
@ -947,12 +832,12 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
while (state->have < 19)
state->lens[order[state->have++]] = 0;
state->next = state->codes;
state->lencode = (const code FAR *)(state->next);
state->lencode = state->distcode = (const code FAR *)(state->next);
state->lenbits = 7;
ret = inflate_table(CODES, state->lens, 19, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid code lengths set";
strm->msg = (z_const char *)"invalid code lengths set";
state->mode = BAD;
break;
}
@ -976,7 +861,8 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
NEEDBITS(here.bits + 2);
DROPBITS(here.bits);
if (state->have == 0) {
strm->msg = (char *)"invalid bit length repeat";
strm->msg = (z_const char *)
"invalid bit length repeat";
state->mode = BAD;
break;
}
@ -999,7 +885,8 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
DROPBITS(7);
}
if (state->have + copy > state->nlen + state->ndist) {
strm->msg = (char *)"invalid bit length repeat";
strm->msg = (z_const char *)
"invalid bit length repeat";
state->mode = BAD;
break;
}
@ -1013,7 +900,8 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
/* check for end-of-block code (better have one) */
if (state->lens[256] == 0) {
strm->msg = (char *)"invalid code -- missing end-of-block";
strm->msg = (z_const char *)
"invalid code -- missing end-of-block";
state->mode = BAD;
break;
}
@ -1027,7 +915,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
ret = inflate_table(LENS, state->lens, state->nlen, &(state->next),
&(state->lenbits), state->work);
if (ret) {
strm->msg = (char *)"invalid literal/lengths set";
strm->msg = (z_const char *)"invalid literal/lengths set";
state->mode = BAD;
break;
}
@ -1036,7 +924,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist,
&(state->next), &(state->distbits), state->work);
if (ret) {
strm->msg = (char *)"invalid distances set";
strm->msg = (z_const char *)"invalid distances set";
state->mode = BAD;
break;
}
@ -1090,7 +978,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
break;
}
if (here.op & 64) {
strm->msg = (char *)"invalid literal/length code";
strm->msg = (z_const char *)"invalid literal/length code";
state->mode = BAD;
break;
}
@ -1128,7 +1016,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
DROPBITS(here.bits);
state->back += here.bits;
if (here.op & 64) {
strm->msg = (char *)"invalid distance code";
strm->msg = (z_const char *)"invalid distance code";
state->mode = BAD;
break;
}
@ -1145,7 +1033,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
}
#ifdef INFLATE_STRICT
if (state->offset > state->dmax) {
strm->msg = (char *)"invalid distance too far back";
strm->msg = (z_const char *)"invalid distance too far back";
state->mode = BAD;
break;
}
@ -1160,7 +1048,8 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
copy = state->offset - copy;
if (copy > state->whave) {
if (state->sane) {
strm->msg = (char *)"invalid distance too far back";
strm->msg = (z_const char *)
"invalid distance too far back";
state->mode = BAD;
break;
}
@ -1219,7 +1108,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
state->flags ? hold :
#endif
ZSWAP32(hold)) != state->check) {
strm->msg = (char *)"incorrect data check";
strm->msg = (z_const char *)"incorrect data check";
state->mode = BAD;
break;
}
@ -1233,7 +1122,7 @@ int ZEXPORT inflate(z_streamp strm, int flush) {
if (state->wrap && state->flags) {
NEEDBITS(32);
if ((state->wrap & 4) && hold != (state->total & 0xffffffff)) {
strm->msg = (char *)"incorrect length check";
strm->msg = (z_const char *)"incorrect length check";
state->mode = BAD;
break;
}
@ -1464,7 +1353,6 @@ int ZEXPORT inflateCopy(z_streamp dest, z_streamp source) {
struct inflate_state FAR *state;
struct inflate_state FAR *copy;
unsigned char FAR *window;
unsigned wsize;
/* check input */
if (inflateStateCheck(source) || dest == Z_NULL)
@ -1475,6 +1363,7 @@ int ZEXPORT inflateCopy(z_streamp dest, z_streamp source) {
copy = (struct inflate_state FAR *)
ZALLOC(source, 1, sizeof(struct inflate_state));
if (copy == Z_NULL) return Z_MEM_ERROR;
zmemzero(copy, sizeof(struct inflate_state));
window = Z_NULL;
if (state->window != Z_NULL) {
window = (unsigned char FAR *)
@ -1486,8 +1375,8 @@ int ZEXPORT inflateCopy(z_streamp dest, z_streamp source) {
}
/* copy state */
zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream));
zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state));
zmemcpy(dest, source, sizeof(z_stream));
zmemcpy(copy, state, sizeof(struct inflate_state));
copy->strm = dest;
if (state->lencode >= state->codes &&
state->lencode <= state->codes + ENOUGH - 1) {
@ -1495,10 +1384,8 @@ int ZEXPORT inflateCopy(z_streamp dest, z_streamp source) {
copy->distcode = copy->codes + (state->distcode - state->codes);
}
copy->next = copy->codes + (state->next - state->codes);
if (window != Z_NULL) {
wsize = 1U << state->wbits;
zmemcpy(window, state->window, wsize);
}
if (window != Z_NULL)
zmemcpy(window, state->window, state->whave);
copy->window = window;
dest->state = (struct internal_state FAR *)copy;
return Z_OK;

View File

@ -124,7 +124,7 @@ struct inflate_state {
unsigned char FAR *window; /* allocated sliding window, if needed */
/* bit accumulator */
unsigned long hold; /* input bit accumulator */
unsigned bits; /* number of bits in "in" */
unsigned bits; /* number of bits in hold */
/* for string and stored block copying */
unsigned length; /* literal or length of data to copy */
unsigned offset; /* distance back to copy string from */

View File

@ -23,17 +23,31 @@
*/
/* inftrees.c -- generate Huffman trees for efficient decoding
* Copyright (C) 1995-2024 Mark Adler
* Copyright (C) 1995-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#ifdef MAKEFIXED
# ifndef BUILDFIXED
# define BUILDFIXED
# endif
#endif
#ifdef BUILDFIXED
# define Z_ONCE
#endif
#include "zutil.h"
#include "inftrees.h"
#include "inflate.h"
#ifndef NULL
# define NULL 0
#endif
#define MAXBITS 15
const char inflate_copyright[] =
" inflate 1.3.1 Copyright 1995-2024 Mark Adler ";
" inflate 1.3.2 Copyright 1995-2026 Mark Adler ";
/*
If you use the zlib library in a product, an acknowledgment is welcome
in the documentation of your product. If for some reason you cannot
@ -71,9 +85,9 @@ int ZLIB_INTERNAL inflate_table(codetype type, unsigned short FAR *lens,
unsigned mask; /* mask for low root bits */
code here; /* table entry for duplication */
code FAR *next; /* next available space in table */
const unsigned short FAR *base; /* base value table to use */
const unsigned short FAR *extra; /* extra bits table to use */
unsigned match; /* use base and extra for symbol >= match */
const unsigned short FAR *base = NULL; /* base value table to use */
const unsigned short FAR *extra = NULL; /* extra bits table to use */
unsigned match = 0; /* use base and extra for symbol >= match */
unsigned short count[MAXBITS+1]; /* number of codes of each length */
unsigned short offs[MAXBITS+1]; /* offsets in table for each length */
static const unsigned short lbase[31] = { /* Length codes 257..285 base */
@ -81,7 +95,7 @@ int ZLIB_INTERNAL inflate_table(codetype type, unsigned short FAR *lens,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0};
static const unsigned short lext[31] = { /* Length codes 257..285 extra */
16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 203, 77};
19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 199, 75};
static const unsigned short dbase[32] = { /* Distance codes 0..29 base */
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
@ -199,7 +213,6 @@ int ZLIB_INTERNAL inflate_table(codetype type, unsigned short FAR *lens,
/* set up for code type */
switch (type) {
case CODES:
base = extra = work; /* dummy value--not used */
match = 20;
break;
case LENS:
@ -207,10 +220,9 @@ int ZLIB_INTERNAL inflate_table(codetype type, unsigned short FAR *lens,
extra = lext;
match = 257;
break;
default: /* DISTS */
case DISTS:
base = dbase;
extra = dext;
match = 0;
}
/* initialize state for loop */
@ -321,3 +333,116 @@ int ZLIB_INTERNAL inflate_table(codetype type, unsigned short FAR *lens,
*bits = root;
return 0;
}
#ifdef BUILDFIXED
/*
If this is compiled with BUILDFIXED defined, and if inflate will be used in
multiple threads, and if atomics are not available, then inflate() must be
called with a fixed block (e.g. 0x03 0x00) to initialize the tables and must
return before any other threads are allowed to call inflate.
*/
static code *lenfix, *distfix;
static code fixed[544];
/* State for z_once(). */
local z_once_t built = Z_ONCE_INIT;
local void buildtables(void) {
unsigned sym, bits;
static code *next;
unsigned short lens[288], work[288];
/* literal/length table */
sym = 0;
while (sym < 144) lens[sym++] = 8;
while (sym < 256) lens[sym++] = 9;
while (sym < 280) lens[sym++] = 7;
while (sym < 288) lens[sym++] = 8;
next = fixed;
lenfix = next;
bits = 9;
inflate_table(LENS, lens, 288, &(next), &(bits), work);
/* distance table */
sym = 0;
while (sym < 32) lens[sym++] = 5;
distfix = next;
bits = 5;
inflate_table(DISTS, lens, 32, &(next), &(bits), work);
}
#else /* !BUILDFIXED */
# include "inffixed.h"
#endif /* BUILDFIXED */
/*
Return state with length and distance decoding tables and index sizes set to
fixed code decoding. Normally this returns fixed tables from inffixed.h.
If BUILDFIXED is defined, then instead this routine builds the tables the
first time it's called, and returns those tables the first time and
thereafter. This reduces the size of the code by about 2K bytes, in
exchange for a little execution time. However, BUILDFIXED should not be
used for threaded applications if atomics are not available, as it will
not be thread-safe.
*/
void inflate_fixed(struct inflate_state FAR *state) {
#ifdef BUILDFIXED
z_once(&built, buildtables);
#endif /* BUILDFIXED */
state->lencode = lenfix;
state->lenbits = 9;
state->distcode = distfix;
state->distbits = 5;
}
#ifdef MAKEFIXED
#include <stdio.h>
/*
Write out the inffixed.h that will be #include'd above. Defining MAKEFIXED
also defines BUILDFIXED, so the tables are built on the fly. main() writes
those tables to stdout, which would directed to inffixed.h. Compile this
along with zutil.c:
cc -DMAKEFIXED -o fix inftrees.c zutil.c
./fix > inffixed.h
*/
int main(void) {
unsigned low, size;
struct inflate_state state;
inflate_fixed(&state);
puts("/* inffixed.h -- table for decoding fixed codes");
puts(" * Generated automatically by makefixed().");
puts(" */");
puts("");
puts("/* WARNING: this file should *not* be used by applications.");
puts(" It is part of the implementation of this library and is");
puts(" subject to change. Applications should only use zlib.h.");
puts(" */");
puts("");
size = 1U << 9;
printf("static const code lenfix[%u] = {", size);
low = 0;
for (;;) {
if ((low % 7) == 0) printf("\n ");
printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op,
state.lencode[low].bits, state.lencode[low].val);
if (++low == size) break;
putchar(',');
}
puts("\n};");
size = 1U << 5;
printf("\nstatic const code distfix[%u] = {", size);
low = 0;
for (;;) {
if ((low % 6) == 0) printf("\n ");
printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits,
state.distcode[low].val);
if (++low == size) break;
putchar(',');
}
puts("\n};");
return 0;
}
#endif /* MAKEFIXED */

View File

@ -23,7 +23,7 @@
*/
/* inftrees.h -- header to use inftrees.c
* Copyright (C) 1995-2005, 2010 Mark Adler
* Copyright (C) 1995-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -84,3 +84,5 @@ typedef enum {
int ZLIB_INTERNAL inflate_table(codetype type, unsigned short FAR *lens,
unsigned codes, code FAR * FAR *table,
unsigned FAR *bits, unsigned short FAR *work);
struct inflate_state;
void ZLIB_INTERNAL inflate_fixed(struct inflate_state FAR *state);

View File

@ -1,4 +1,4 @@
Changes from zlib 1.3.1
Changes in JDK's in-tree zlib compared to upstream zlib 1.3.2
(1) renamed adler32.c -> zadler32.c, crc32c -> zcrc32.c

View File

@ -23,7 +23,7 @@
*/
/* trees.c -- output deflated data using Huffman coding
* Copyright (C) 1995-2024 Jean-loup Gailly
* Copyright (C) 1995-2026 Jean-loup Gailly
* detect_data_type() function provided freely by Cosmin Truta, 2006
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -136,7 +136,7 @@ local int base_dist[D_CODES];
#else
# include "trees.h"
#endif /* GEN_TREES_H */
#endif /* defined(GEN_TREES_H) || !defined(STDC) */
struct static_tree_desc_s {
const ct_data *static_tree; /* static tree or NULL */
@ -176,7 +176,7 @@ local TCONST static_tree_desc static_bl_desc =
* IN assertion: 1 <= len <= 15
*/
local unsigned bi_reverse(unsigned code, int len) {
register unsigned res = 0;
unsigned res = 0;
do {
res |= code & 1;
code >>= 1, res <<= 1;
@ -208,10 +208,11 @@ local void bi_windup(deflate_state *s) {
} else if (s->bi_valid > 0) {
put_byte(s, (Byte)s->bi_buf);
}
s->bi_used = ((s->bi_valid - 1) & 7) + 1;
s->bi_buf = 0;
s->bi_valid = 0;
#ifdef ZLIB_DEBUG
s->bits_sent = (s->bits_sent + 7) & ~7;
s->bits_sent = (s->bits_sent + 7) & ~(ulg)7;
#endif
}
@ -490,6 +491,7 @@ void ZLIB_INTERNAL _tr_init(deflate_state *s) {
s->bi_buf = 0;
s->bi_valid = 0;
s->bi_used = 0;
#ifdef ZLIB_DEBUG
s->compressed_len = 0L;
s->bits_sent = 0L;
@ -748,7 +750,7 @@ local void scan_tree(deflate_state *s, ct_data *tree, int max_code) {
if (++count < max_count && curlen == nextlen) {
continue;
} else if (count < min_count) {
s->bl_tree[curlen].Freq += count;
s->bl_tree[curlen].Freq += (ush)count;
} else if (curlen != 0) {
if (curlen != prevlen) s->bl_tree[curlen].Freq++;
s->bl_tree[REP_3_6].Freq++;
@ -841,7 +843,7 @@ local int build_bl_tree(deflate_state *s) {
}
/* Update opt_len to include the bit length tree and counts */
s->opt_len += 3*((ulg)max_blindex + 1) + 5 + 5 + 4;
Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
Tracev((stderr, "\ndyn trees: dyn %lu, stat %lu",
s->opt_len, s->static_len));
return max_blindex;
@ -867,13 +869,13 @@ local void send_all_trees(deflate_state *s, int lcodes, int dcodes,
Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
send_bits(s, s->bl_tree[bl_order[rank]].Len, 3);
}
Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
Tracev((stderr, "\nbl tree: sent %lu", s->bits_sent));
send_tree(s, (ct_data *)s->dyn_ltree, lcodes - 1); /* literal tree */
Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
Tracev((stderr, "\nlit tree: sent %lu", s->bits_sent));
send_tree(s, (ct_data *)s->dyn_dtree, dcodes - 1); /* distance tree */
Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
Tracev((stderr, "\ndist tree: sent %lu", s->bits_sent));
}
/* ===========================================================================
@ -956,7 +958,7 @@ local void compress_block(deflate_state *s, const ct_data *ltree,
extra = extra_dbits[code];
if (extra != 0) {
dist -= (unsigned)base_dist[code];
send_bits(s, dist, extra); /* send the extra distance bits */
send_bits(s, (int)dist, extra); /* send the extra bits */
}
} /* literal or match pair ? */
@ -1030,11 +1032,11 @@ void ZLIB_INTERNAL _tr_flush_block(deflate_state *s, charf *buf,
/* Construct the literal and distance trees */
build_tree(s, (tree_desc *)(&(s->l_desc)));
Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
Tracev((stderr, "\nlit data: dyn %lu, stat %lu", s->opt_len,
s->static_len));
build_tree(s, (tree_desc *)(&(s->d_desc)));
Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
Tracev((stderr, "\ndist data: dyn %lu, stat %lu", s->opt_len,
s->static_len));
/* At this point, opt_len and static_len are the total bit lengths of
* the compressed block data, excluding the tree representations.
@ -1107,7 +1109,7 @@ void ZLIB_INTERNAL _tr_flush_block(deflate_state *s, charf *buf,
#endif
}
Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len >> 3,
s->compressed_len - 7*last));
s->compressed_len - 7*(ulg)last));
}
/* ===========================================================================

View File

@ -23,7 +23,7 @@
*/
/* uncompr.c -- decompress a memory buffer
* Copyright (C) 1995-2003, 2010, 2014, 2016 Jean-loup Gailly, Mark Adler
* Copyright (C) 1995-2026 Jean-loup Gailly, Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -47,24 +47,24 @@
memory, Z_BUF_ERROR if there was not enough room in the output buffer, or
Z_DATA_ERROR if the input data was corrupted, including if the input data is
an incomplete zlib stream.
The _z versions of the functions take size_t length arguments.
*/
int ZEXPORT uncompress2(Bytef *dest, uLongf *destLen, const Bytef *source,
uLong *sourceLen) {
int ZEXPORT uncompress2_z(Bytef *dest, z_size_t *destLen, const Bytef *source,
z_size_t *sourceLen) {
z_stream stream;
int err;
const uInt max = (uInt)-1;
uLong len, left;
Byte buf[1]; /* for detection of incomplete stream when *destLen == 0 */
z_size_t len, left;
if (sourceLen == NULL || (*sourceLen > 0 && source == NULL) ||
destLen == NULL || (*destLen > 0 && dest == NULL))
return Z_STREAM_ERROR;
len = *sourceLen;
if (*destLen) {
left = *destLen;
*destLen = 0;
}
else {
left = 1;
dest = buf;
}
left = *destLen;
if (left == 0 && dest == Z_NULL)
dest = (Bytef *)&stream.reserved; /* next_out cannot be NULL */
stream.next_in = (z_const Bytef *)source;
stream.avail_in = 0;
@ -80,30 +80,46 @@ int ZEXPORT uncompress2(Bytef *dest, uLongf *destLen, const Bytef *source,
do {
if (stream.avail_out == 0) {
stream.avail_out = left > (uLong)max ? max : (uInt)left;
stream.avail_out = left > (z_size_t)max ? max : (uInt)left;
left -= stream.avail_out;
}
if (stream.avail_in == 0) {
stream.avail_in = len > (uLong)max ? max : (uInt)len;
stream.avail_in = len > (z_size_t)max ? max : (uInt)len;
len -= stream.avail_in;
}
err = inflate(&stream, Z_NO_FLUSH);
} while (err == Z_OK);
*sourceLen -= len + stream.avail_in;
if (dest != buf)
*destLen = stream.total_out;
else if (stream.total_out && err == Z_BUF_ERROR)
left = 1;
/* Set len and left to the unused input data and unused output space. Set
*sourceLen to the amount of input consumed. Set *destLen to the amount
of data produced. */
len += stream.avail_in;
left += stream.avail_out;
*sourceLen -= len;
*destLen -= left;
inflateEnd(&stream);
return err == Z_STREAM_END ? Z_OK :
err == Z_NEED_DICT ? Z_DATA_ERROR :
err == Z_BUF_ERROR && left + stream.avail_out ? Z_DATA_ERROR :
err == Z_BUF_ERROR && len == 0 ? Z_DATA_ERROR :
err;
}
int ZEXPORT uncompress2(Bytef *dest, uLongf *destLen, const Bytef *source,
uLong *sourceLen) {
int ret;
z_size_t got = *destLen, used = *sourceLen;
ret = uncompress2_z(dest, &got, source, &used);
*sourceLen = (uLong)used;
*destLen = (uLong)got;
return ret;
}
int ZEXPORT uncompress_z(Bytef *dest, z_size_t *destLen, const Bytef *source,
z_size_t sourceLen) {
z_size_t used = sourceLen;
return uncompress2_z(dest, destLen, source, &used);
}
int ZEXPORT uncompress(Bytef *dest, uLongf *destLen, const Bytef *source,
uLong sourceLen) {
return uncompress2(dest, destLen, source, &sourceLen);
uLong used = sourceLen;
return uncompress2(dest, destLen, source, &used);
}

View File

@ -23,7 +23,7 @@
*/
/* zconf.h -- configuration of the zlib compression library
* Copyright (C) 1995-2024 Jean-loup Gailly, Mark Adler
* Copyright (C) 1995-2026 Jean-loup Gailly, Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -57,7 +57,10 @@
# ifndef Z_SOLO
# define compress z_compress
# define compress2 z_compress2
# define compress_z z_compress_z
# define compress2_z z_compress2_z
# define compressBound z_compressBound
# define compressBound_z z_compressBound_z
# endif
# define crc32 z_crc32
# define crc32_combine z_crc32_combine
@ -68,6 +71,7 @@
# define crc32_z z_crc32_z
# define deflate z_deflate
# define deflateBound z_deflateBound
# define deflateBound_z z_deflateBound_z
# define deflateCopy z_deflateCopy
# define deflateEnd z_deflateEnd
# define deflateGetDictionary z_deflateGetDictionary
@ -83,6 +87,7 @@
# define deflateSetDictionary z_deflateSetDictionary
# define deflateSetHeader z_deflateSetHeader
# define deflateTune z_deflateTune
# define deflateUsed z_deflateUsed
# define deflate_copyright z_deflate_copyright
# define get_crc_table z_get_crc_table
# ifndef Z_SOLO
@ -152,9 +157,12 @@
# define inflate_copyright z_inflate_copyright
# define inflate_fast z_inflate_fast
# define inflate_table z_inflate_table
# define inflate_fixed z_inflate_fixed
# ifndef Z_SOLO
# define uncompress z_uncompress
# define uncompress2 z_uncompress2
# define uncompress_z z_uncompress_z
# define uncompress2_z z_uncompress2_z
# endif
# define zError z_zError
# ifndef Z_SOLO
@ -258,10 +266,12 @@
# endif
#endif
#if defined(ZLIB_CONST) && !defined(z_const)
# define z_const const
#else
# define z_const
#ifndef z_const
# ifdef ZLIB_CONST
# define z_const const
# else
# define z_const
# endif
#endif
#ifdef Z_SOLO
@ -457,11 +467,11 @@ typedef uLong FAR uLongf;
typedef unsigned long z_crc_t;
#endif
#ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */
#if HAVE_UNISTD_H-0 /* may be set to #if 1 by ./configure */
# define Z_HAVE_UNISTD_H
#endif
#ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */
#if HAVE_STDARG_H-0 /* may be set to #if 1 by ./configure */
# define Z_HAVE_STDARG_H
#endif
@ -494,12 +504,8 @@ typedef uLong FAR uLongf;
#endif
#ifndef Z_HAVE_UNISTD_H
# ifdef __WATCOMC__
# define Z_HAVE_UNISTD_H
# endif
#endif
#ifndef Z_HAVE_UNISTD_H
# if defined(_LARGEFILE64_SOURCE) && !defined(_WIN32)
# if defined(__WATCOMC__) || defined(__GO32__) || \
(defined(_LARGEFILE64_SOURCE) && !defined(_WIN32))
# define Z_HAVE_UNISTD_H
# endif
#endif
@ -534,17 +540,19 @@ typedef uLong FAR uLongf;
#endif
#ifndef z_off_t
# define z_off_t long
# define z_off_t long long
#endif
#if !defined(_WIN32) && defined(Z_LARGE64)
# define z_off64_t off64_t
#elif defined(__MINGW32__)
# define z_off64_t long long
#elif defined(_WIN32) && !defined(__GNUC__)
# define z_off64_t __int64
#elif defined(__GO32__)
# define z_off64_t offset_t
#else
# if defined(_WIN32) && !defined(__GNUC__)
# define z_off64_t __int64
# else
# define z_off64_t z_off_t
# endif
# define z_off64_t z_off_t
#endif
/* MVS linker does not support external names larger than 8 bytes */

View File

@ -23,7 +23,7 @@
*/
/* crc32.c -- compute the CRC-32 of a data stream
* Copyright (C) 1995-2022 Mark Adler
* Copyright (C) 1995-2026 Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*
* This interleaved implementation of a CRC makes use of pipelined multiple
@ -48,11 +48,18 @@
# include <stdio.h>
# ifndef DYNAMIC_CRC_TABLE
# define DYNAMIC_CRC_TABLE
# endif /* !DYNAMIC_CRC_TABLE */
#endif /* MAKECRCH */
# endif
#endif
#ifdef DYNAMIC_CRC_TABLE
# define Z_ONCE
#endif
#include "zutil.h" /* for Z_U4, Z_U8, z_crc_t, and FAR definitions */
#ifdef HAVE_S390X_VX
# include "contrib/crc32vx/crc32_vx_hooks.h"
#endif
/*
A CRC of a message is computed on N braids of words in the message, where
each word consists of W bytes (4 or 8). If N is 3, for example, then three
@ -123,7 +130,8 @@
#endif
/* If available, use the ARM processor CRC32 instruction. */
#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) && W == 8
#if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32) && \
defined(W) && W == 8
# define ARMCRC32
#endif
@ -176,10 +184,10 @@ local z_word_t byte_swap(z_word_t word) {
Return a(x) multiplied by b(x) modulo p(x), where p(x) is the CRC polynomial,
reflected. For speed, this requires that a not be zero.
*/
local z_crc_t multmodp(z_crc_t a, z_crc_t b) {
z_crc_t m, p;
local uLong multmodp(uLong a, uLong b) {
uLong m, p;
m = (z_crc_t)1 << 31;
m = (uLong)1 << 31;
p = 0;
for (;;) {
if (a & m) {
@ -195,12 +203,12 @@ local z_crc_t multmodp(z_crc_t a, z_crc_t b) {
/*
Return x^(n * 2^k) modulo p(x). Requires that x2n_table[] has been
initialized.
initialized. n must not be negative.
*/
local z_crc_t x2nmodp(z_off64_t n, unsigned k) {
z_crc_t p;
local uLong x2nmodp(z_off64_t n, unsigned k) {
uLong p;
p = (z_crc_t)1 << 31; /* x^0 == 1 */
p = (uLong)1 << 31; /* x^0 == 1 */
while (n) {
if (n & 1)
p = multmodp(x2n_table[k & 31], p);
@ -228,83 +236,8 @@ local z_crc_t FAR crc_table[256];
local void write_table64(FILE *, const z_word_t FAR *, int);
#endif /* MAKECRCH */
/*
Define a once() function depending on the availability of atomics. If this is
compiled with DYNAMIC_CRC_TABLE defined, and if CRCs will be computed in
multiple threads, and if atomics are not available, then get_crc_table() must
be called to initialize the tables and must return before any threads are
allowed to compute or combine CRCs.
*/
/* Definition of once functionality. */
typedef struct once_s once_t;
/* Check for the availability of atomics. */
#if defined(__STDC__) && __STDC_VERSION__ >= 201112L && \
!defined(__STDC_NO_ATOMICS__)
#include <stdatomic.h>
/* Structure for once(), which must be initialized with ONCE_INIT. */
struct once_s {
atomic_flag begun;
atomic_int done;
};
#define ONCE_INIT {ATOMIC_FLAG_INIT, 0}
/*
Run the provided init() function exactly once, even if multiple threads
invoke once() at the same time. The state must be a once_t initialized with
ONCE_INIT.
*/
local void once(once_t *state, void (*init)(void)) {
if (!atomic_load(&state->done)) {
if (atomic_flag_test_and_set(&state->begun))
while (!atomic_load(&state->done))
;
else {
init();
atomic_store(&state->done, 1);
}
}
}
#else /* no atomics */
/* Structure for once(), which must be initialized with ONCE_INIT. */
struct once_s {
volatile int begun;
volatile int done;
};
#define ONCE_INIT {0, 0}
/* Test and set. Alas, not atomic, but tries to minimize the period of
vulnerability. */
local int test_and_set(int volatile *flag) {
int was;
was = *flag;
*flag = 1;
return was;
}
/* Run the provided init() function once. This is not thread-safe. */
local void once(once_t *state, void (*init)(void)) {
if (!state->done) {
if (test_and_set(&state->begun))
while (!state->done)
;
else {
init();
state->done = 1;
}
}
}
#endif
/* State for once(). */
local once_t made = ONCE_INIT;
local z_once_t made = Z_ONCE_INIT;
/*
Generate tables for a byte-wise 32-bit CRC calculation on the polynomial:
@ -350,7 +283,7 @@ local void make_crc_table(void) {
p = (z_crc_t)1 << 30; /* x^1 */
x2n_table[0] = p;
for (n = 1; n < 32; n++)
x2n_table[n] = p = multmodp(p, p);
x2n_table[n] = p = (z_crc_t)multmodp(p, p);
#ifdef W
/* initialize the braiding tables -- needs x2n_table[] */
@ -553,11 +486,11 @@ local void braid(z_crc_t ltl[][256], z_word_t big[][256], int n, int w) {
int k;
z_crc_t i, p, q;
for (k = 0; k < w; k++) {
p = x2nmodp((n * w + 3 - k) << 3, 0);
p = (z_crc_t)x2nmodp((n * w + 3 - k) << 3, 0);
ltl[k][0] = 0;
big[w - 1 - k][0] = 0;
for (i = 1; i < 256; i++) {
ltl[k][i] = q = multmodp(i << 24, p);
ltl[k][i] = q = (z_crc_t)multmodp(i << 24, p);
big[w - 1 - k][i] = byte_swap(q);
}
}
@ -572,7 +505,7 @@ local void braid(z_crc_t ltl[][256], z_word_t big[][256], int n, int w) {
*/
const z_crc_t FAR * ZEXPORT get_crc_table(void) {
#ifdef DYNAMIC_CRC_TABLE
once(&made, make_crc_table);
z_once(&made, make_crc_table);
#endif /* DYNAMIC_CRC_TABLE */
return (const z_crc_t FAR *)crc_table;
}
@ -596,9 +529,8 @@ const z_crc_t FAR * ZEXPORT get_crc_table(void) {
#define Z_BATCH_ZEROS 0xa10d3d0c /* computed from Z_BATCH = 3990 */
#define Z_BATCH_MIN 800 /* fewest words in a final batch */
unsigned long ZEXPORT crc32_z(unsigned long crc, const unsigned char FAR *buf,
z_size_t len) {
z_crc_t val;
uLong ZEXPORT crc32_z(uLong crc, const unsigned char FAR *buf, z_size_t len) {
uLong val;
z_word_t crc1, crc2;
const z_word_t *word;
z_word_t val0, val1, val2;
@ -609,7 +541,7 @@ unsigned long ZEXPORT crc32_z(unsigned long crc, const unsigned char FAR *buf,
if (buf == Z_NULL) return 0;
#ifdef DYNAMIC_CRC_TABLE
once(&made, make_crc_table);
z_once(&made, make_crc_table);
#endif /* DYNAMIC_CRC_TABLE */
/* Pre-condition the CRC */
@ -664,7 +596,7 @@ unsigned long ZEXPORT crc32_z(unsigned long crc, const unsigned char FAR *buf,
}
word += 3 * last;
num -= 3 * last;
val = x2nmodp(last, 6);
val = x2nmodp((int)last, 6);
crc = multmodp(val, crc) ^ crc1;
crc = multmodp(val, crc) ^ crc2;
}
@ -715,13 +647,12 @@ local z_word_t crc_word_big(z_word_t data) {
#endif
/* ========================================================================= */
unsigned long ZEXPORT crc32_z(unsigned long crc, const unsigned char FAR *buf,
z_size_t len) {
uLong ZEXPORT crc32_z(uLong crc, const unsigned char FAR *buf, z_size_t len) {
/* Return initial CRC, if requested. */
if (buf == Z_NULL) return 0;
#ifdef DYNAMIC_CRC_TABLE
once(&made, make_crc_table);
z_once(&made, make_crc_table);
#endif /* DYNAMIC_CRC_TABLE */
/* Pre-condition the CRC */
@ -1036,28 +967,19 @@ unsigned long ZEXPORT crc32_z(unsigned long crc, const unsigned char FAR *buf,
#endif
/* ========================================================================= */
unsigned long ZEXPORT crc32(unsigned long crc, const unsigned char FAR *buf,
uInt len) {
uLong ZEXPORT crc32(uLong crc, const unsigned char FAR *buf, uInt len) {
#ifdef HAVE_S390X_VX
return crc32_z_hook(crc, buf, len);
#endif
return crc32_z(crc, buf, len);
}
/* ========================================================================= */
uLong ZEXPORT crc32_combine64(uLong crc1, uLong crc2, z_off64_t len2) {
#ifdef DYNAMIC_CRC_TABLE
once(&made, make_crc_table);
#endif /* DYNAMIC_CRC_TABLE */
return multmodp(x2nmodp(len2, 3), crc1) ^ (crc2 & 0xffffffff);
}
/* ========================================================================= */
uLong ZEXPORT crc32_combine(uLong crc1, uLong crc2, z_off_t len2) {
return crc32_combine64(crc1, crc2, (z_off64_t)len2);
}
/* ========================================================================= */
uLong ZEXPORT crc32_combine_gen64(z_off64_t len2) {
if (len2 < 0)
return 0;
#ifdef DYNAMIC_CRC_TABLE
once(&made, make_crc_table);
z_once(&made, make_crc_table);
#endif /* DYNAMIC_CRC_TABLE */
return x2nmodp(len2, 3);
}
@ -1069,5 +991,17 @@ uLong ZEXPORT crc32_combine_gen(z_off_t len2) {
/* ========================================================================= */
uLong ZEXPORT crc32_combine_op(uLong crc1, uLong crc2, uLong op) {
return multmodp(op, crc1) ^ (crc2 & 0xffffffff);
if (op == 0)
return 0;
return multmodp(op, crc1 & 0xffffffff) ^ (crc2 & 0xffffffff);
}
/* ========================================================================= */
uLong ZEXPORT crc32_combine64(uLong crc1, uLong crc2, z_off64_t len2) {
return crc32_combine_op(crc1, crc2, crc32_combine_gen64(len2));
}
/* ========================================================================= */
uLong ZEXPORT crc32_combine(uLong crc1, uLong crc2, z_off_t len2) {
return crc32_combine64(crc1, crc2, (z_off64_t)len2);
}

View File

@ -23,9 +23,9 @@
*/
/* zlib.h -- interface of the 'zlib' general purpose compression library
version 1.3.1, January 22nd, 2024
version 1.3.2, February 17th, 2026
Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler
Copyright (C) 1995-2026 Jean-loup Gailly and Mark Adler
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
@ -48,24 +48,28 @@
The data format used by the zlib library is described by RFCs (Request for
Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950
Comments) 1950 to 1952 at https://datatracker.ietf.org/doc/html/rfc1950
(zlib format), rfc1951 (deflate format) and rfc1952 (gzip format).
*/
#ifndef ZLIB_H
#define ZLIB_H
#include "zconf.h"
#ifdef ZLIB_BUILD
# include <zconf.h>
#else
# include "zconf.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
#define ZLIB_VERSION "1.3.1"
#define ZLIB_VERNUM 0x1310
#define ZLIB_VERSION "1.3.2"
#define ZLIB_VERNUM 0x1320
#define ZLIB_VER_MAJOR 1
#define ZLIB_VER_MINOR 3
#define ZLIB_VER_REVISION 1
#define ZLIB_VER_REVISION 2
#define ZLIB_VER_SUBREVISION 0
/*
@ -465,7 +469,7 @@ ZEXTERN int ZEXPORT inflate(z_streamp strm, int flush);
The Z_BLOCK option assists in appending to or combining deflate streams.
To assist in this, on return inflate() always sets strm->data_type to the
number of unused bits in the last byte taken from strm->next_in, plus 64 if
number of unused bits in the input taken from strm->next_in, plus 64 if
inflate() is currently decoding the last block in the deflate stream, plus
128 if inflate() returned immediately after decoding an end-of-block code or
decoding the complete header up to just before the first byte of the deflate
@ -611,18 +615,21 @@ ZEXTERN int ZEXPORT deflateInit2(z_streamp strm,
The strategy parameter is used to tune the compression algorithm. Use the
value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a
filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no
string match), or Z_RLE to limit match distances to one (run-length
encoding). Filtered data consists mostly of small values with a somewhat
random distribution. In this case, the compression algorithm is tuned to
compress them better. The effect of Z_FILTERED is to force more Huffman
coding and less string matching; it is somewhat intermediate between
Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as
fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The
strategy parameter only affects the compression ratio but not the
correctness of the compressed output even if it is not set appropriately.
Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler
decoder for special applications.
filter (or predictor), Z_RLE to limit match distances to one (run-length
encoding), or Z_HUFFMAN_ONLY to force Huffman encoding only (no string
matching). Filtered data consists mostly of small values with a somewhat
random distribution, as produced by the PNG filters. In this case, the
compression algorithm is tuned to compress them better. The effect of
Z_FILTERED is to force more Huffman coding and less string matching than the
default; it is intermediate between Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY.
Z_RLE is almost as fast as Z_HUFFMAN_ONLY, but should give better
compression for PNG image data than Huffman only. The degree of string
matching from most to none is: Z_DEFAULT_STRATEGY, Z_FILTERED, Z_RLE, then
Z_HUFFMAN_ONLY. The strategy parameter affects the compression ratio but
never the correctness of the compressed output, even if it is not set
optimally for the given data. Z_FIXED uses the default string matching, but
prevents the use of dynamic Huffman codes, allowing for a simpler decoder
for special applications.
deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough
memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid
@ -782,8 +789,8 @@ ZEXTERN int ZEXPORT deflateTune(z_streamp strm,
returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream.
*/
ZEXTERN uLong ZEXPORT deflateBound(z_streamp strm,
uLong sourceLen);
ZEXTERN uLong ZEXPORT deflateBound(z_streamp strm, uLong sourceLen);
ZEXTERN z_size_t ZEXPORT deflateBound_z(z_streamp strm, z_size_t sourceLen);
/*
deflateBound() returns an upper bound on the compressed size after
deflation of sourceLen bytes. It must be called after deflateInit() or
@ -795,6 +802,9 @@ ZEXTERN uLong ZEXPORT deflateBound(z_streamp strm,
to return Z_STREAM_END. Note that it is possible for the compressed size to
be larger than the value returned by deflateBound() if flush options other
than Z_FINISH or Z_NO_FLUSH are used.
delfateBound_z() is the same, but takes and returns a size_t length. Note
that a long is 32 bits on Windows.
*/
ZEXTERN int ZEXPORT deflatePending(z_streamp strm,
@ -809,6 +819,21 @@ ZEXTERN int ZEXPORT deflatePending(z_streamp strm,
or bits are Z_NULL, then those values are not set.
deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent. If an int is 16 bits and memLevel is 9, then
it is possible for the number of pending bytes to not fit in an unsigned. In
that case Z_BUF_ERROR is returned and *pending is set to the maximum value
of an unsigned.
*/
ZEXTERN int ZEXPORT deflateUsed(z_streamp strm,
int *bits);
/*
deflateUsed() returns in *bits the most recent number of deflate bits used
in the last byte when flushing to a byte boundary. The result is in 1..8, or
0 if there has not yet been a flush. This helps determine the location of
the last bit of a deflate stream.
deflateUsed returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent.
*/
@ -1011,13 +1036,15 @@ ZEXTERN int ZEXPORT inflatePrime(z_streamp strm,
int bits,
int value);
/*
This function inserts bits in the inflate input stream. The intent is
that this function is used to start inflating at a bit position in the
middle of a byte. The provided bits will be used before any bytes are used
from next_in. This function should only be used with raw inflate, and
should be used before the first inflate() call after inflateInit2() or
inflateReset(). bits must be less than or equal to 16, and that many of the
least significant bits of value will be inserted in the input.
This function inserts bits in the inflate input stream. The intent is to
use inflatePrime() to start inflating at a bit position in the middle of a
byte. The provided bits will be used before any bytes are used from
next_in. This function should be used with raw inflate, before the first
inflate() call, after inflateInit2() or inflateReset(). It can also be used
after an inflate() return indicates the end of a deflate block or header
when using Z_BLOCK. bits must be less than or equal to 16, and that many of
the least significant bits of value will be inserted in the input. The
other bits in value can be non-zero, and will be ignored.
If bits is negative, then the input stream bit buffer is emptied. Then
inflatePrime() can be called again to put bits in the buffer. This is used
@ -1025,7 +1052,15 @@ ZEXTERN int ZEXPORT inflatePrime(z_streamp strm,
to feeding inflate codes.
inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source
stream state was inconsistent.
stream state was inconsistent, or if bits is out of range. If inflate was
in the middle of processing a header, trailer, or stored block lengths, then
it is possible for there to be only eight bits available in the bit buffer.
In that case, bits > 8 is considered out of range. However, when used as
outlined above, there will always be 16 bits available in the buffer for
insertion. As noted in its documentation above, inflate records the number
of bits in the bit buffer on return in data_type. 32 minus that is the
number of bits available for insertion. inflatePrime does not update
data_type with the new number of bits in buffer.
*/
ZEXTERN long ZEXPORT inflateMark(z_streamp strm);
@ -1071,20 +1106,22 @@ ZEXTERN int ZEXPORT inflateGetHeader(z_streamp strm,
The text, time, xflags, and os fields are filled in with the gzip header
contents. hcrc is set to true if there is a header CRC. (The header CRC
was valid if done is set to one.) If extra is not Z_NULL, then extra_max
contains the maximum number of bytes to write to extra. Once done is true,
extra_len contains the actual extra field length, and extra contains the
extra field, or that field truncated if extra_max is less than extra_len.
If name is not Z_NULL, then up to name_max characters are written there,
terminated with a zero unless the length is greater than name_max. If
comment is not Z_NULL, then up to comm_max characters are written there,
terminated with a zero unless the length is greater than comm_max. When any
of extra, name, or comment are not Z_NULL and the respective field is not
present in the header, then that field is set to Z_NULL to signal its
absence. This allows the use of deflateSetHeader() with the returned
structure to duplicate the header. However if those fields are set to
allocated memory, then the application will need to save those pointers
elsewhere so that they can be eventually freed.
was valid if done is set to one.) The extra, name, and comment pointers
much each be either Z_NULL or point to space to store that information from
the header. If extra is not Z_NULL, then extra_max contains the maximum
number of bytes that can be written to extra. Once done is true, extra_len
contains the actual extra field length, and extra contains the extra field,
or that field truncated if extra_max is less than extra_len. If name is not
Z_NULL, then up to name_max characters, including the terminating zero, are
written there. If comment is not Z_NULL, then up to comm_max characters,
including the terminating zero, are written there. The application can tell
that the name or comment did not fit in the provided space by the absence of
a terminating zero. If any of extra, name, or comment are not present in
the header, then that field's pointer is set to Z_NULL. This allows the use
of deflateSetHeader() with the returned structure to duplicate the header.
Note that if those fields initially pointed to allocated memory, then the
application will need to save them elsewhere so that they can be eventually
freed.
If inflateGetHeader is not used, then the header information is simply
discarded. The header is always checked for validity, including the header
@ -1232,13 +1269,14 @@ ZEXTERN uLong ZEXPORT zlibCompileFlags(void);
21: FASTEST -- deflate algorithm with only one, lowest compression level
22,23: 0 (reserved)
The sprintf variant used by gzprintf (zero is best):
The sprintf variant used by gzprintf (all zeros is best):
24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format
25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure!
25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() is not secure!
26: 0 = returns value, 1 = void -- 1 means inferred string length returned
27: 0 = gzprintf() present, 1 = not -- 1 means gzprintf() returns an error
Remainder:
27-31: 0 (reserved)
28-31: 0 (reserved)
*/
#ifndef Z_SOLO
@ -1250,11 +1288,14 @@ ZEXTERN uLong ZEXPORT zlibCompileFlags(void);
stream-oriented functions. To simplify the interface, some default options
are assumed (compression level and memory usage, standard memory allocation
functions). The source code of these utility functions can be modified if
you need special options.
you need special options. The _z versions of the functions use the size_t
type for lengths. Note that a long is 32 bits on Windows.
*/
ZEXTERN int ZEXPORT compress(Bytef *dest, uLongf *destLen,
ZEXTERN int ZEXPORT compress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen);
ZEXTERN int ZEXPORT compress_z(Bytef *dest, z_size_t *destLen,
const Bytef *source, z_size_t sourceLen);
/*
Compresses the source buffer into the destination buffer. sourceLen is
the byte length of the source buffer. Upon entry, destLen is the total size
@ -1268,9 +1309,12 @@ ZEXTERN int ZEXPORT compress(Bytef *dest, uLongf *destLen,
buffer.
*/
ZEXTERN int ZEXPORT compress2(Bytef *dest, uLongf *destLen,
ZEXTERN int ZEXPORT compress2(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen,
int level);
ZEXTERN int ZEXPORT compress2_z(Bytef *dest, z_size_t *destLen,
const Bytef *source, z_size_t sourceLen,
int level);
/*
Compresses the source buffer into the destination buffer. The level
parameter has the same meaning as in deflateInit. sourceLen is the byte
@ -1285,21 +1329,24 @@ ZEXTERN int ZEXPORT compress2(Bytef *dest, uLongf *destLen,
*/
ZEXTERN uLong ZEXPORT compressBound(uLong sourceLen);
ZEXTERN z_size_t ZEXPORT compressBound_z(z_size_t sourceLen);
/*
compressBound() returns an upper bound on the compressed size after
compress() or compress2() on sourceLen bytes. It would be used before a
compress() or compress2() call to allocate the destination buffer.
*/
ZEXTERN int ZEXPORT uncompress(Bytef *dest, uLongf *destLen,
ZEXTERN int ZEXPORT uncompress(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong sourceLen);
ZEXTERN int ZEXPORT uncompress_z(Bytef *dest, z_size_t *destLen,
const Bytef *source, z_size_t sourceLen);
/*
Decompresses the source buffer into the destination buffer. sourceLen is
the byte length of the source buffer. Upon entry, destLen is the total size
the byte length of the source buffer. On entry, *destLen is the total size
of the destination buffer, which must be large enough to hold the entire
uncompressed data. (The size of the uncompressed data must have been saved
previously by the compressor and transmitted to the decompressor by some
mechanism outside the scope of this compression library.) Upon exit, destLen
mechanism outside the scope of this compression library.) On exit, *destLen
is the actual size of the uncompressed data.
uncompress returns Z_OK if success, Z_MEM_ERROR if there was not
@ -1309,8 +1356,10 @@ ZEXTERN int ZEXPORT uncompress(Bytef *dest, uLongf *destLen,
buffer with the uncompressed data up to that point.
*/
ZEXTERN int ZEXPORT uncompress2(Bytef *dest, uLongf *destLen,
ZEXTERN int ZEXPORT uncompress2(Bytef *dest, uLongf *destLen,
const Bytef *source, uLong *sourceLen);
ZEXTERN int ZEXPORT uncompress2_z(Bytef *dest, z_size_t *destLen,
const Bytef *source, z_size_t *sourceLen);
/*
Same as uncompress, except that sourceLen is a pointer, where the
length of the source is *sourceLen. On return, *sourceLen is the number of
@ -1338,13 +1387,17 @@ ZEXTERN gzFile ZEXPORT gzopen(const char *path, const char *mode);
'R' for run-length encoding as in "wb1R", or 'F' for fixed code compression
as in "wb9F". (See the description of deflateInit2 for more information
about the strategy parameter.) 'T' will request transparent writing or
appending with no compression and not using the gzip format.
appending with no compression and not using the gzip format. 'T' cannot be
used to force transparent reading. Transparent reading is automatically
performed if there is no gzip header at the start. Transparent reading can
be disabled with the 'G' option, which will instead return an error if there
is no gzip header. 'N' will open the file in non-blocking mode.
"a" can be used instead of "w" to request that the gzip stream that will
be written be appended to the file. "+" will result in an error, since
'a' can be used instead of 'w' to request that the gzip stream that will
be written be appended to the file. '+' will result in an error, since
reading and writing to the same gzip file is not supported. The addition of
"x" when writing will create the file exclusively, which fails if the file
already exists. On systems that support it, the addition of "e" when
'x' when writing will create the file exclusively, which fails if the file
already exists. On systems that support it, the addition of 'e' when
reading or writing will set the flag to close the file on an execve() call.
These functions, as well as gzip, will read and decode a sequence of gzip
@ -1363,14 +1416,22 @@ ZEXTERN gzFile ZEXPORT gzopen(const char *path, const char *mode);
insufficient memory to allocate the gzFile state, or if an invalid mode was
specified (an 'r', 'w', or 'a' was not provided, or '+' was provided).
errno can be checked to determine if the reason gzopen failed was that the
file could not be opened.
file could not be opened. Note that if 'N' is in mode for non-blocking, the
open() itself can fail in order to not block. In that case gzopen() will
return NULL and errno will be EAGAIN or ENONBLOCK. The call to gzopen() can
then be re-tried. If the application would like to block on opening the
file, then it can use open() without O_NONBLOCK, and then gzdopen() with the
resulting file descriptor and 'N' in the mode, which will set it to non-
blocking.
*/
ZEXTERN gzFile ZEXPORT gzdopen(int fd, const char *mode);
/*
Associate a gzFile with the file descriptor fd. File descriptors are
obtained from calls like open, dup, creat, pipe or fileno (if the file has
been previously opened with fopen). The mode parameter is as in gzopen.
been previously opened with fopen). The mode parameter is as in gzopen. An
'e' in mode will set fd's flag to close the file on an execve() call. An 'N'
in mode will set fd's non-blocking flag.
The next call of gzclose on the returned gzFile will also close the file
descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor
@ -1440,10 +1501,16 @@ ZEXTERN int ZEXPORT gzread(gzFile file, voidp buf, unsigned len);
stream. Alternatively, gzerror can be used before gzclose to detect this
case.
gzread can be used to read a gzip file on a non-blocking device. If the
input stalls and there is no uncompressed data to return, then gzread() will
return -1, and errno will be EAGAIN or EWOULDBLOCK. gzread() can then be
called again.
gzread returns the number of uncompressed bytes actually read, less than
len for end of file, or -1 for error. If len is too large to fit in an int,
then nothing is read, -1 is returned, and the error state is set to
Z_STREAM_ERROR.
Z_STREAM_ERROR. If some data was read before an error, then that data is
returned until exhausted, after which the next call will signal the error.
*/
ZEXTERN z_size_t ZEXPORT gzfread(voidp buf, z_size_t size, z_size_t nitems,
@ -1467,15 +1534,20 @@ ZEXTERN z_size_t ZEXPORT gzfread(voidp buf, z_size_t size, z_size_t nitems,
multiple of size, then the final partial item is nevertheless read into buf
and the end-of-file flag is set. The length of the partial item read is not
provided, but could be inferred from the result of gztell(). This behavior
is the same as the behavior of fread() implementations in common libraries,
but it prevents the direct use of gzfread() to read a concurrently written
file, resetting and retrying on end-of-file, when size is not 1.
is the same as that of fread() implementations in common libraries. This
could result in data loss if used with size != 1 when reading a concurrently
written file or a non-blocking file. In that case, use size == 1 or gzread()
instead.
*/
ZEXTERN int ZEXPORT gzwrite(gzFile file, voidpc buf, unsigned len);
/*
Compress and write the len uncompressed bytes at buf to file. gzwrite
returns the number of uncompressed bytes written or 0 in case of error.
returns the number of uncompressed bytes written, or 0 in case of error or
if len is 0. If the write destination is non-blocking, then gzwrite() may
return a number of bytes written that is not 0 and less than len.
If len does not fit in an int, then 0 is returned and nothing is written.
*/
ZEXTERN z_size_t ZEXPORT gzfwrite(voidpc buf, z_size_t size,
@ -1490,9 +1562,18 @@ ZEXTERN z_size_t ZEXPORT gzfwrite(voidpc buf, z_size_t size,
if there was an error. If the multiplication of size and nitems overflows,
i.e. the product does not fit in a z_size_t, then nothing is written, zero
is returned, and the error state is set to Z_STREAM_ERROR.
If writing a concurrently read file or a non-blocking file with size != 1,
a partial item could be written, with no way of knowing how much of it was
not written, resulting in data loss. In that case, use size == 1 or
gzwrite() instead.
*/
#if defined(STDC) || defined(Z_HAVE_STDARG_H)
ZEXTERN int ZEXPORTVA gzprintf(gzFile file, const char *format, ...);
#else
ZEXTERN int ZEXPORTVA gzprintf();
#endif
/*
Convert, format, compress, and write the arguments (...) to file under
control of the string format, as in fprintf. gzprintf returns the number of
@ -1500,11 +1581,19 @@ ZEXTERN int ZEXPORTVA gzprintf(gzFile file, const char *format, ...);
of error. The number of uncompressed bytes written is limited to 8191, or
one less than the buffer size given to gzbuffer(). The caller should assure
that this limit is not exceeded. If it is exceeded, then gzprintf() will
return an error (0) with nothing written. In this case, there may also be a
buffer overflow with unpredictable consequences, which is possible only if
zlib was compiled with the insecure functions sprintf() or vsprintf(),
because the secure snprintf() or vsnprintf() functions were not available.
This can be determined using zlibCompileFlags().
return an error (0) with nothing written.
In that last case, there may also be a buffer overflow with unpredictable
consequences, which is possible only if zlib was compiled with the insecure
functions sprintf() or vsprintf(), because the secure snprintf() and
vsnprintf() functions were not available. That would only be the case for
a non-ANSI C compiler. zlib may have been built without gzprintf() because
secure functions were not available and having gzprintf() be insecure was
not an option, in which case, gzprintf() returns Z_STREAM_ERROR. All of
these possibilities can be determined using zlibCompileFlags().
If a Z_BUF_ERROR is returned, then nothing was written due to a stall on
the non-blocking write destination.
*/
ZEXTERN int ZEXPORT gzputs(gzFile file, const char *s);
@ -1513,6 +1602,11 @@ ZEXTERN int ZEXPORT gzputs(gzFile file, const char *s);
the terminating null character.
gzputs returns the number of characters written, or -1 in case of error.
The number of characters written may be less than the length of the string
if the write destination is non-blocking.
If the length of the string does not fit in an int, then -1 is returned
and nothing is written.
*/
ZEXTERN char * ZEXPORT gzgets(gzFile file, char *buf, int len);
@ -1525,8 +1619,13 @@ ZEXTERN char * ZEXPORT gzgets(gzFile file, char *buf, int len);
left untouched.
gzgets returns buf which is a null-terminated string, or it returns NULL
for end-of-file or in case of error. If there was an error, the contents at
buf are indeterminate.
for end-of-file or in case of error. If some data was read before an error,
then that data is returned until exhausted, after which the next call will
return NULL to signal the error.
gzgets can be used on a file being concurrently written, and on a non-
blocking device, both as for gzread(). However lines may be broken in the
middle, leaving it up to the application to reassemble them as needed.
*/
ZEXTERN int ZEXPORT gzputc(gzFile file, int c);
@ -1537,11 +1636,19 @@ ZEXTERN int ZEXPORT gzputc(gzFile file, int c);
ZEXTERN int ZEXPORT gzgetc(gzFile file);
/*
Read and decompress one byte from file. gzgetc returns this byte or -1
in case of end of file or error. This is implemented as a macro for speed.
As such, it does not do all of the checking the other functions do. I.e.
it does not check to see if file is NULL, nor whether the structure file
points to has been clobbered or not.
Read and decompress one byte from file. gzgetc returns this byte or -1 in
case of end of file or error. If some data was read before an error, then
that data is returned until exhausted, after which the next call will return
-1 to signal the error.
This is implemented as a macro for speed. As such, it does not do all of
the checking the other functions do. I.e. it does not check to see if file
is NULL, nor whether the structure file points to has been clobbered or not.
gzgetc can be used to read a gzip file on a non-blocking device. If the
input stalls and there is no uncompressed data to return, then gzgetc() will
return -1, and errno will be EAGAIN or EWOULDBLOCK. gzread() can then be
called again.
*/
ZEXTERN int ZEXPORT gzungetc(int c, gzFile file);
@ -1554,6 +1661,11 @@ ZEXTERN int ZEXPORT gzungetc(int c, gzFile file);
output buffer size of pushed characters is allowed. (See gzbuffer above.)
The pushed character will be discarded if the stream is repositioned with
gzseek() or gzrewind().
gzungetc(-1, file) will force any pending seek to execute. Then gztell()
will report the position, even if the requested seek reached end of file.
This can be used to determine the number of uncompressed bytes in a gzip
file without having to read it into a buffer.
*/
ZEXTERN int ZEXPORT gzflush(gzFile file, int flush);
@ -1583,7 +1695,8 @@ ZEXTERN z_off_t ZEXPORT gzseek(gzFile file,
If the file is opened for reading, this function is emulated but can be
extremely slow. If the file is opened for writing, only forward seeks are
supported; gzseek then compresses a sequence of zeroes up to the new
starting position.
starting position. For reading or writing, any actual seeking is deferred
until the next read or write operation, or close operation when writing.
gzseek returns the resulting offset location as measured in bytes from
the beginning of the uncompressed stream, or -1 in case of error, in
@ -1591,7 +1704,7 @@ ZEXTERN z_off_t ZEXPORT gzseek(gzFile file,
would be before the current position.
*/
ZEXTERN int ZEXPORT gzrewind(gzFile file);
ZEXTERN int ZEXPORT gzrewind(gzFile file);
/*
Rewind file. This function is supported only for reading.
@ -1599,7 +1712,7 @@ ZEXTERN int ZEXPORT gzrewind(gzFile file);
*/
/*
ZEXTERN z_off_t ZEXPORT gztell(gzFile file);
ZEXTERN z_off_t ZEXPORT gztell(gzFile file);
Return the starting position for the next gzread or gzwrite on file.
This position represents a number of bytes in the uncompressed data stream,
@ -1644,8 +1757,11 @@ ZEXTERN int ZEXPORT gzdirect(gzFile file);
If gzdirect() is used immediately after gzopen() or gzdopen() it will
cause buffers to be allocated to allow reading the file to determine if it
is a gzip file. Therefore if gzbuffer() is used, it should be called before
gzdirect().
is a gzip file. Therefore if gzbuffer() is used, it should be called before
gzdirect(). If the input is being written concurrently or the device is non-
blocking, then gzdirect() may give a different answer once four bytes of
input have been accumulated, which is what is needed to confirm or deny a
gzip header. Before this, gzdirect() will return true (1).
When writing, gzdirect() returns true (1) if transparent writing was
requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note:
@ -1655,7 +1771,7 @@ ZEXTERN int ZEXPORT gzdirect(gzFile file);
gzip file reading and decompression, which may not be desired.)
*/
ZEXTERN int ZEXPORT gzclose(gzFile file);
ZEXTERN int ZEXPORT gzclose(gzFile file);
/*
Flush all pending output for file, if necessary, close file and
deallocate the (de)compression state. Note that once file is closed, you
@ -1683,9 +1799,10 @@ ZEXTERN int ZEXPORT gzclose_w(gzFile file);
ZEXTERN const char * ZEXPORT gzerror(gzFile file, int *errnum);
/*
Return the error message for the last error which occurred on file.
errnum is set to zlib error number. If an error occurred in the file system
and not in the compression library, errnum is set to Z_ERRNO and the
application may consult errno to get the exact error code.
If errnum is not NULL, *errnum is set to zlib error number. If an error
occurred in the file system and not in the compression library, *errnum is
set to Z_ERRNO and the application may consult errno to get the exact error
code.
The application must not modify the returned string. Future calls to
this function may invalidate the previously returned string. If file is
@ -1736,7 +1853,8 @@ ZEXTERN uLong ZEXPORT adler32(uLong adler, const Bytef *buf, uInt len);
ZEXTERN uLong ZEXPORT adler32_z(uLong adler, const Bytef *buf,
z_size_t len);
/*
Same as adler32(), but with a size_t length.
Same as adler32(), but with a size_t length. Note that a long is 32 bits
on Windows.
*/
/*
@ -1772,7 +1890,8 @@ ZEXTERN uLong ZEXPORT crc32(uLong crc, const Bytef *buf, uInt len);
ZEXTERN uLong ZEXPORT crc32_z(uLong crc, const Bytef *buf,
z_size_t len);
/*
Same as crc32(), but with a size_t length.
Same as crc32(), but with a size_t length. Note that a long is 32 bits on
Windows.
*/
/*
@ -1782,14 +1901,14 @@ ZEXTERN uLong ZEXPORT crc32_combine(uLong crc1, uLong crc2, z_off_t len2);
seq1 and seq2 with lengths len1 and len2, CRC-32 check values were
calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32
check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and
len2. len2 must be non-negative.
len2. len2 must be non-negative, otherwise zero is returned.
*/
/*
ZEXTERN uLong ZEXPORT crc32_combine_gen(z_off_t len2);
Return the operator corresponding to length len2, to be used with
crc32_combine_op(). len2 must be non-negative.
crc32_combine_op(). len2 must be non-negative, otherwise zero is returned.
*/
ZEXTERN uLong ZEXPORT crc32_combine_op(uLong crc1, uLong crc2, uLong op);
@ -1912,9 +2031,9 @@ ZEXTERN int ZEXPORT gzgetc_(gzFile file); /* backward compatibility */
ZEXTERN z_off_t ZEXPORT gzseek64(gzFile, z_off_t, int);
ZEXTERN z_off_t ZEXPORT gztell64(gzFile);
ZEXTERN z_off_t ZEXPORT gzoffset64(gzFile);
ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off_t);
ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off_t);
ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off_t);
ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off64_t);
ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off64_t);
ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off64_t);
# endif
#else
ZEXTERN gzFile ZEXPORT gzopen(const char *, const char *);

View File

@ -23,7 +23,7 @@
*/
/* zutil.c -- target dependent utility functions for the compression library
* Copyright (C) 1995-2017 Jean-loup Gailly
* Copyright (C) 1995-2026 Jean-loup Gailly
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -110,28 +110,36 @@ uLong ZEXPORT zlibCompileFlags(void) {
flags += 1L << 21;
#endif
#if defined(STDC) || defined(Z_HAVE_STDARG_H)
# ifdef NO_vsnprintf
flags += 1L << 25;
# ifdef HAS_vsprintf_void
flags += 1L << 26;
# endif
# else
# ifdef HAS_vsnprintf_void
flags += 1L << 26;
# endif
# endif
# ifdef NO_vsnprintf
# ifdef ZLIB_INSECURE
flags += 1L << 25;
# else
flags += 1L << 27;
# endif
# ifdef HAS_vsprintf_void
flags += 1L << 26;
# endif
# else
# ifdef HAS_vsnprintf_void
flags += 1L << 26;
# endif
# endif
#else
flags += 1L << 24;
# ifdef NO_snprintf
flags += 1L << 25;
# ifdef HAS_sprintf_void
flags += 1L << 26;
# endif
# else
# ifdef HAS_snprintf_void
flags += 1L << 26;
# endif
# endif
# ifdef NO_snprintf
# ifdef ZLIB_INSECURE
flags += 1L << 25;
# else
flags += 1L << 27;
# endif
# ifdef HAS_sprintf_void
flags += 1L << 26;
# endif
# else
# ifdef HAS_snprintf_void
flags += 1L << 26;
# endif
# endif
#endif
return flags;
}
@ -166,28 +174,34 @@ const char * ZEXPORT zError(int err) {
#ifndef HAVE_MEMCPY
void ZLIB_INTERNAL zmemcpy(Bytef* dest, const Bytef* source, uInt len) {
if (len == 0) return;
do {
*dest++ = *source++; /* ??? to be unrolled */
} while (--len != 0);
void ZLIB_INTERNAL zmemcpy(void FAR *dst, const void FAR *src, z_size_t n) {
uchf *p = dst;
const uchf *q = src;
while (n) {
*p++ = *q++;
n--;
}
}
int ZLIB_INTERNAL zmemcmp(const Bytef* s1, const Bytef* s2, uInt len) {
uInt j;
for (j = 0; j < len; j++) {
if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1;
int ZLIB_INTERNAL zmemcmp(const void FAR *s1, const void FAR *s2, z_size_t n) {
const uchf *p = s1, *q = s2;
while (n) {
if (*p++ != *q++)
return (int)p[-1] - (int)q[-1];
n--;
}
return 0;
}
void ZLIB_INTERNAL zmemzero(Bytef* dest, uInt len) {
void ZLIB_INTERNAL zmemzero(void FAR *b, z_size_t len) {
uchf *p = b;
if (len == 0) return;
do {
*dest++ = 0; /* ??? to be unrolled */
} while (--len != 0);
while (len) {
*p++ = 0;
len--;
}
}
#endif
#ifndef Z_SOLO

View File

@ -23,7 +23,7 @@
*/
/* zutil.h -- internal interface and configuration of the compression library
* Copyright (C) 1995-2024 Jean-loup Gailly, Mark Adler
* Copyright (C) 1995-2026 Jean-loup Gailly, Mark Adler
* For conditions of distribution and use, see copyright notice in zlib.h
*/
@ -60,6 +60,10 @@
define "local" for the non-static meaning of "static", for readability
(compile with -Dlocal if your debugger can't find static symbols) */
extern const char deflate_copyright[];
extern const char inflate_copyright[];
extern const char inflate9_copyright[];
typedef unsigned char uch;
typedef uch FAR uchf;
typedef unsigned short ush;
@ -72,6 +76,8 @@ typedef unsigned long ulg;
# define Z_U8 unsigned long
# elif (ULLONG_MAX == 0xffffffffffffffff)
# define Z_U8 unsigned long long
# elif (ULONG_LONG_MAX == 0xffffffffffffffff)
# define Z_U8 unsigned long long
# elif (UINT_MAX == 0xffffffffffffffff)
# define Z_U8 unsigned
# endif
@ -87,7 +93,9 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
/* To be used only when the state is known to be valid */
/* common constants */
#if MAX_WBITS < 9 || MAX_WBITS > 15
# error MAX_WBITS must be in 9..15
#endif
#ifndef DEF_WBITS
# define DEF_WBITS MAX_WBITS
#endif
@ -165,7 +173,7 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
# define OS_CODE 7
#endif
#ifdef __acorn
#if defined(__acorn) || defined(__riscos)
# define OS_CODE 13
#endif
@ -192,11 +200,10 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
#endif
/* provide prototypes for these when building zlib without LFS */
#if !defined(_WIN32) && \
(!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0)
ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off_t);
ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off_t);
ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off_t);
#ifndef Z_LARGE64
ZEXTERN uLong ZEXPORT adler32_combine64(uLong, uLong, z_off64_t);
ZEXTERN uLong ZEXPORT crc32_combine64(uLong, uLong, z_off64_t);
ZEXTERN uLong ZEXPORT crc32_combine_gen64(z_off64_t);
#endif
/* common defaults */
@ -235,9 +242,9 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
# define zmemzero(dest, len) memset(dest, 0, len)
# endif
#else
void ZLIB_INTERNAL zmemcpy(Bytef* dest, const Bytef* source, uInt len);
int ZLIB_INTERNAL zmemcmp(const Bytef* s1, const Bytef* s2, uInt len);
void ZLIB_INTERNAL zmemzero(Bytef* dest, uInt len);
void ZLIB_INTERNAL zmemcpy(void FAR *, const void FAR *, z_size_t);
int ZLIB_INTERNAL zmemcmp(const void FAR *, const void FAR *, z_size_t);
void ZLIB_INTERNAL zmemzero(void FAR *, z_size_t);
#endif
/* Diagnostic functions */
@ -275,4 +282,74 @@ extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */
#define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \
(((q) & 0xff00) << 8) + (((q) & 0xff) << 24))
#ifdef Z_ONCE
/*
Create a local z_once() function depending on the availability of atomics.
*/
/* Check for the availability of atomics. */
#if defined(__STDC__) && __STDC_VERSION__ >= 201112L && \
!defined(__STDC_NO_ATOMICS__)
#include <stdatomic.h>
typedef struct {
atomic_flag begun;
atomic_int done;
} z_once_t;
#define Z_ONCE_INIT {ATOMIC_FLAG_INIT, 0}
/*
Run the provided init() function exactly once, even if multiple threads
invoke once() at the same time. The state must be a once_t initialized with
Z_ONCE_INIT.
*/
local void z_once(z_once_t *state, void (*init)(void)) {
if (!atomic_load(&state->done)) {
if (atomic_flag_test_and_set(&state->begun))
while (!atomic_load(&state->done))
;
else {
init();
atomic_store(&state->done, 1);
}
}
}
#else /* no atomics */
#warning zlib not thread-safe
typedef struct z_once_s {
volatile int begun;
volatile int done;
} z_once_t;
#define Z_ONCE_INIT {0, 0}
/* Test and set. Alas, not atomic, but tries to limit the period of
vulnerability. */
local int test_and_set(int volatile *flag) {
int was;
was = *flag;
*flag = 1;
return was;
}
/* Run the provided init() function once. This is not thread-safe. */
local void z_once(z_once_t *state, void (*init)(void)) {
if (!state->done) {
if (test_and_set(&state->begun))
while (!state->done)
;
else {
init();
state->done = 1;
}
}
}
#endif /* ?atomics */
#endif /* Z_ONCE */
#endif /* ZUTIL_H */

Some files were not shown because too many files have changed in this diff Show More