From 055b4b426cbc56d97e82219f3dd3aba1ebf977e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20Jeli=C5=84ski?= Date: Mon, 3 Jul 2023 07:51:38 +0000 Subject: [PATCH] 8310948: Fix ignored-qualifiers warning in Hotspot Reviewed-by: kbarrett, dholmes --- make/hotspot/lib/CompileJvm.gmk | 4 +-- src/hotspot/cpu/aarch64/aarch64.ad | 16 +++++----- src/hotspot/cpu/aarch64/aarch64_vector.ad | 8 ++--- src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 | 8 ++--- src/hotspot/cpu/aarch64/assembler_aarch64.hpp | 2 +- src/hotspot/cpu/aarch64/matcher_aarch64.hpp | 6 ++-- src/hotspot/cpu/arm/arm.ad | 24 +++++++-------- src/hotspot/cpu/arm/assembler_arm.hpp | 2 +- src/hotspot/cpu/ppc/matcher_ppc.hpp | 4 +-- src/hotspot/cpu/ppc/ppc.ad | 24 +++++++-------- src/hotspot/cpu/riscv/assembler_riscv.hpp | 2 +- .../cpu/riscv/javaFrameAnchor_riscv.hpp | 2 +- src/hotspot/cpu/riscv/matcher_riscv.hpp | 6 ++-- src/hotspot/cpu/riscv/riscv.ad | 16 +++++----- src/hotspot/cpu/riscv/riscv_v.ad | 8 ++--- src/hotspot/cpu/riscv/vm_version_riscv.hpp | 6 ++-- src/hotspot/cpu/s390/assembler_s390.hpp | 2 +- src/hotspot/cpu/s390/matcher_s390.hpp | 6 ++-- src/hotspot/cpu/s390/s390.ad | 24 +++++++-------- .../x86/gc/shared/barrierSetNMethod_x86.cpp | 2 +- src/hotspot/cpu/x86/matcher_x86.hpp | 8 ++--- src/hotspot/cpu/x86/x86.ad | 22 +++++++------- src/hotspot/cpu/x86/x86_32.ad | 2 +- src/hotspot/cpu/x86/x86_64.ad | 2 +- src/hotspot/share/adlc/output_c.cpp | 8 ++--- src/hotspot/share/c1/c1_ValueType.hpp | 20 ++++++------- .../share/classfile/classFileParser.cpp | 4 +-- .../share/classfile/classFileStream.cpp | 2 +- .../share/classfile/classFileStream.hpp | 2 +- src/hotspot/share/compiler/compilerOracle.cpp | 2 +- src/hotspot/share/gc/g1/g1ConcurrentMark.cpp | 4 +-- src/hotspot/share/gc/g1/g1HRPrinter.hpp | 2 +- .../share/gc/parallel/psParallelCompact.hpp | 2 +- src/hotspot/share/gc/parallel/psScavenge.hpp | 4 +-- .../share/gc/serial/defNewGeneration.hpp | 2 +- src/hotspot/share/gc/serial/markSweep.hpp | 2 +- src/hotspot/share/gc/shared/gcHeapSummary.hpp | 10 +++---- src/hotspot/share/gc/shared/plab.hpp | 4 +-- src/hotspot/share/gc/shared/workerThread.hpp | 2 +- .../share/gc/shenandoah/shenandoahHeap.hpp | 4 +-- .../gc/shenandoah/shenandoahHeap.inline.hpp | 4 +-- .../gc/shenandoah/shenandoahHeapRegion.hpp | 2 +- src/hotspot/share/gc/x/xRelocate.cpp | 4 +-- src/hotspot/share/gc/z/zRelocate.cpp | 4 +-- src/hotspot/share/jfr/jni/jfrUpcalls.cpp | 20 ++++++------- .../share/jfr/leakprofiler/chains/edge.cpp | 4 +-- .../share/jfr/leakprofiler/chains/edge.hpp | 4 +-- .../jfr/leakprofiler/chains/edgeUtils.cpp | 2 +- .../checkpoint/objectSampleWriter.cpp | 4 +-- .../leakprofiler/sampling/objectSample.cpp | 2 +- .../leakprofiler/sampling/objectSample.hpp | 4 +-- .../share/jfr/periodic/jfrModuleEvent.cpp | 2 +- .../checkpoint/types/jfrThreadGroup.cpp | 10 +++---- .../share/jfr/support/jfrJdkJfrEvent.cpp | 2 +- .../share/jfr/support/jfrTraceIdExtension.hpp | 2 +- src/hotspot/share/oops/instanceKlass.hpp | 2 +- src/hotspot/share/oops/method.hpp | 2 +- src/hotspot/share/oops/method.inline.hpp | 2 +- src/hotspot/share/opto/callnode.hpp | 4 +-- src/hotspot/share/opto/castnode.hpp | 2 +- src/hotspot/share/opto/cfgnode.hpp | 8 ++--- src/hotspot/share/opto/machnode.hpp | 6 ++-- src/hotspot/share/opto/matcher.cpp | 2 +- src/hotspot/share/opto/matcher.hpp | 30 +++++++++---------- src/hotspot/share/opto/node.hpp | 4 +-- src/hotspot/share/opto/type.hpp | 4 +-- src/hotspot/share/prims/jvmtiEnter.xsl | 2 +- src/hotspot/share/prims/jvmtiTagMap.cpp | 8 ++--- src/hotspot/share/prims/jvmtiUtil.hpp | 4 +-- src/hotspot/share/runtime/threadSMR.hpp | 6 ++-- src/hotspot/share/services/memoryService.hpp | 4 +-- src/hotspot/share/utilities/bitMap.hpp | 2 +- src/hotspot/share/utilities/bitMap.inline.hpp | 2 +- src/hotspot/share/utilities/elfFile.hpp | 2 +- 74 files changed, 223 insertions(+), 223 deletions(-) diff --git a/make/hotspot/lib/CompileJvm.gmk b/make/hotspot/lib/CompileJvm.gmk index 9f95eccd5d9..9a70dfd960e 100644 --- a/make/hotspot/lib/CompileJvm.gmk +++ b/make/hotspot/lib/CompileJvm.gmk @@ -84,11 +84,11 @@ CFLAGS_VM_VERSION := \ # Disabled warnings DISABLED_WARNINGS_gcc := array-bounds comment delete-non-virtual-dtor \ - empty-body ignored-qualifiers implicit-fallthrough int-in-bool-context \ + empty-body implicit-fallthrough int-in-bool-context \ maybe-uninitialized missing-field-initializers parentheses \ shift-negative-value unknown-pragmas -DISABLED_WARNINGS_clang := ignored-qualifiers sometimes-uninitialized \ +DISABLED_WARNINGS_clang := sometimes-uninitialized \ missing-braces delete-non-abstract-non-virtual-dtor unknown-pragmas ifneq ($(DEBUG_LEVEL), release) diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad index fea6a3d7d11..2357c33fc8d 100644 --- a/src/hotspot/cpu/aarch64/aarch64.ad +++ b/src/hotspot/cpu/aarch64/aarch64.ad @@ -2285,7 +2285,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) //============================================================================= -const bool Matcher::match_rule_supported(int opcode) { +bool Matcher::match_rule_supported(int opcode) { if (!has_match_rule(opcode)) return false; @@ -2320,7 +2320,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) } // Vector calling convention not yet implemented. -const bool Matcher::supports_vector_calling_convention(void) { +bool Matcher::supports_vector_calling_convention(void) { return false; } @@ -2340,7 +2340,7 @@ bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { } // Vector width in bytes. -const int Matcher::vector_width_in_bytes(BasicType bt) { +int Matcher::vector_width_in_bytes(BasicType bt) { // The MaxVectorSize should have been set by detecting SVE max vector register size. int size = MIN2((UseSVE > 0) ? 256 : 16, (int)MaxVectorSize); // Minimum 2 values in vector @@ -2351,11 +2351,11 @@ const int Matcher::vector_width_in_bytes(BasicType bt) { } // Limits on vector size (number of elements) loaded into vector. -const int Matcher::max_vector_size(const BasicType bt) { +int Matcher::max_vector_size(const BasicType bt) { return vector_width_in_bytes(bt)/type2aelembytes(bt); } -const int Matcher::min_vector_size(const BasicType bt) { +int Matcher::min_vector_size(const BasicType bt) { int max_size = max_vector_size(bt); // Limit the min vector size to 8 bytes. int size = 8 / type2aelembytes(bt); @@ -2370,17 +2370,17 @@ const int Matcher::min_vector_size(const BasicType bt) { return MIN2(size, max_size); } -const int Matcher::superword_max_vector_size(const BasicType bt) { +int Matcher::superword_max_vector_size(const BasicType bt) { return Matcher::max_vector_size(bt); } // Actual max scalable vector register length. -const int Matcher::scalable_vector_reg_size(const BasicType bt) { +int Matcher::scalable_vector_reg_size(const BasicType bt) { return Matcher::max_vector_size(bt); } // Vector ideal reg. -const uint Matcher::vector_ideal_reg(int len) { +uint Matcher::vector_ideal_reg(int len) { if (UseSVE > 0 && 16 < len && len <= 256) { return Op_VecA; } diff --git a/src/hotspot/cpu/aarch64/aarch64_vector.ad b/src/hotspot/cpu/aarch64/aarch64_vector.ad index a18f12d3f6d..947756d7504 100644 --- a/src/hotspot/cpu/aarch64/aarch64_vector.ad +++ b/src/hotspot/cpu/aarch64/aarch64_vector.ad @@ -125,7 +125,7 @@ source %{ } } - const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { if (UseSVE == 0) { // These operations are not profitable to be vectorized on NEON, because no direct // NEON instructions support them. But the match rule support for them is profitable for @@ -148,7 +148,7 @@ source %{ // Identify extra cases that we might want to provide match rules for vector nodes and // other intrinsics guarded with vector length (vlen) and element type (bt). - const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { if (!match_rule_supported(opcode)) { return false; } @@ -232,7 +232,7 @@ source %{ return vector_size_supported(bt, vlen); } - const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { // Only SVE supports masked operations. if (UseSVE == 0) { return false; @@ -271,7 +271,7 @@ source %{ return match_rule_supported_vector(opcode, vlen, bt); } - const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { + bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { // Only SVE has partial vector operations if (UseSVE == 0) { return false; diff --git a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 index 10e7dbca6bb..ebecd64cb93 100644 --- a/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 +++ b/src/hotspot/cpu/aarch64/aarch64_vector_ad.m4 @@ -115,7 +115,7 @@ source %{ } } - const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { if (UseSVE == 0) { // These operations are not profitable to be vectorized on NEON, because no direct // NEON instructions support them. But the match rule support for them is profitable for @@ -138,7 +138,7 @@ source %{ // Identify extra cases that we might want to provide match rules for vector nodes and // other intrinsics guarded with vector length (vlen) and element type (bt). - const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { if (!match_rule_supported(opcode)) { return false; } @@ -222,7 +222,7 @@ source %{ return vector_size_supported(bt, vlen); } - const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { // Only SVE supports masked operations. if (UseSVE == 0) { return false; @@ -261,7 +261,7 @@ source %{ return match_rule_supported_vector(opcode, vlen, bt); } - const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { + bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { // Only SVE has partial vector operations if (UseSVE == 0) { return false; diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp index a53d8329645..7ea4d523996 100644 --- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp @@ -4211,7 +4211,7 @@ Instruction_aarch64::~Instruction_aarch64() { #undef starti // Invert a condition -inline const Assembler::Condition operator~(const Assembler::Condition cond) { +inline Assembler::Condition operator~(const Assembler::Condition cond) { return Assembler::Condition(int(cond) ^ 1); } diff --git a/src/hotspot/cpu/aarch64/matcher_aarch64.hpp b/src/hotspot/cpu/aarch64/matcher_aarch64.hpp index b05f1a47e4a..aa22459d217 100644 --- a/src/hotspot/cpu/aarch64/matcher_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/matcher_aarch64.hpp @@ -33,7 +33,7 @@ // Whether this platform implements the scalable vector feature static const bool implements_scalable_vector = true; - static const bool supports_scalable_vector() { + static bool supports_scalable_vector() { return UseSVE > 0; } @@ -144,12 +144,12 @@ } // Does the CPU supports vector unsigned comparison instructions? - static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) { + static constexpr bool supports_vector_comparison_unsigned(int vlen, BasicType bt) { return true; } // Some microarchitectures have mask registers used on vectors - static const bool has_predicated_vectors(void) { + static bool has_predicated_vectors(void) { return UseSVE > 0; } diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad index 4b41f001942..2c86e6be7f1 100644 --- a/src/hotspot/cpu/arm/arm.ad +++ b/src/hotspot/cpu/arm/arm.ad @@ -947,7 +947,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) { return offset; } -const bool Matcher::match_rule_supported(int opcode) { +bool Matcher::match_rule_supported(int opcode) { if (!has_match_rule(opcode)) return false; @@ -1002,11 +1002,11 @@ const bool Matcher::match_rule_supported(int opcode) { return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { return match_rule_supported_vector(opcode, vlen, bt); } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { // TODO // identify extra cases that we might want to provide match rules for @@ -1017,11 +1017,11 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType return ret_value; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { return false; } -const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { +bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { return false; } @@ -1034,7 +1034,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) } // Vector calling convention not yet implemented. -const bool Matcher::supports_vector_calling_convention(void) { +bool Matcher::supports_vector_calling_convention(void) { return false; } @@ -1044,16 +1044,16 @@ OptoRegPair Matcher::vector_return_value(uint ideal_reg) { } // Vector width in bytes -const int Matcher::vector_width_in_bytes(BasicType bt) { +int Matcher::vector_width_in_bytes(BasicType bt) { return MaxVectorSize; } -const int Matcher::scalable_vector_reg_size(const BasicType bt) { +int Matcher::scalable_vector_reg_size(const BasicType bt) { return -1; } // Vector ideal reg corresponding to specified size in bytes -const uint Matcher::vector_ideal_reg(int size) { +uint Matcher::vector_ideal_reg(int size) { assert(MaxVectorSize >= size, ""); switch(size) { case 8: return Op_VecD; @@ -1064,17 +1064,17 @@ const uint Matcher::vector_ideal_reg(int size) { } // Limits on vector size (number of elements) loaded into vector. -const int Matcher::max_vector_size(const BasicType bt) { +int Matcher::max_vector_size(const BasicType bt) { assert(is_java_primitive(bt), "only primitive type vectors"); return vector_width_in_bytes(bt)/type2aelembytes(bt); } -const int Matcher::min_vector_size(const BasicType bt) { +int Matcher::min_vector_size(const BasicType bt) { assert(is_java_primitive(bt), "only primitive type vectors"); return 8/type2aelembytes(bt); } -const int Matcher::superword_max_vector_size(const BasicType bt) { +int Matcher::superword_max_vector_size(const BasicType bt) { return Matcher::max_vector_size(bt); } diff --git a/src/hotspot/cpu/arm/assembler_arm.hpp b/src/hotspot/cpu/arm/assembler_arm.hpp index 818d6a8f44b..b1b4199e88d 100644 --- a/src/hotspot/cpu/arm/assembler_arm.hpp +++ b/src/hotspot/cpu/arm/assembler_arm.hpp @@ -245,7 +245,7 @@ class Address { bool uses(Register reg) const { return _base == reg || _index == reg; } - const relocInfo::relocType rtype() { return _rspec.type(); } + relocInfo::relocType rtype() { return _rspec.type(); } const RelocationHolder& rspec() { return _rspec; } // Convert the raw encoding form into the form expected by the diff --git a/src/hotspot/cpu/ppc/matcher_ppc.hpp b/src/hotspot/cpu/ppc/matcher_ppc.hpp index c2486a0bc48..44d1a3cd305 100644 --- a/src/hotspot/cpu/ppc/matcher_ppc.hpp +++ b/src/hotspot/cpu/ppc/matcher_ppc.hpp @@ -39,7 +39,7 @@ // PPC implementation uses VSX load/store instructions (if // SuperwordUseVSX) which support 4 byte but not arbitrary alignment - static const bool misaligned_vectors_ok() { + static constexpr bool misaligned_vectors_ok() { return false; } @@ -155,7 +155,7 @@ // true means we have fast l2f conversion // false means that conversion is done by runtime call - static const bool convL2FSupported(void) { + static bool convL2FSupported(void) { // fcfids can do the conversion (>= Power7). // fcfid + frsp showed rounding problem when result should be 0x3f800001. return VM_Version::has_fcfids(); diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad index d9e808e4a22..c8f8bf35f22 100644 --- a/src/hotspot/cpu/ppc/ppc.ad +++ b/src/hotspot/cpu/ppc/ppc.ad @@ -2100,7 +2100,7 @@ static int frame_slots_bias(int reg_enc, PhaseRegAlloc* ra_) { return 0; } -const bool Matcher::match_rule_supported(int opcode) { +bool Matcher::match_rule_supported(int opcode) { if (!has_match_rule(opcode)) { return false; // no match rule present } @@ -2170,22 +2170,22 @@ const bool Matcher::match_rule_supported(int opcode) { return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { return match_rule_supported_vector(opcode, vlen, bt); } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) { return false; } return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { return false; } -const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { +bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { return false; } @@ -2198,7 +2198,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) } // Vector calling convention not yet implemented. -const bool Matcher::supports_vector_calling_convention(void) { +bool Matcher::supports_vector_calling_convention(void) { return false; } @@ -2208,7 +2208,7 @@ OptoRegPair Matcher::vector_return_value(uint ideal_reg) { } // Vector width in bytes. -const int Matcher::vector_width_in_bytes(BasicType bt) { +int Matcher::vector_width_in_bytes(BasicType bt) { if (SuperwordUseVSX) { assert(MaxVectorSize == 16, ""); return 16; @@ -2219,7 +2219,7 @@ const int Matcher::vector_width_in_bytes(BasicType bt) { } // Vector ideal reg. -const uint Matcher::vector_ideal_reg(int size) { +uint Matcher::vector_ideal_reg(int size) { if (SuperwordUseVSX) { assert(MaxVectorSize == 16 && size == 16, ""); return Op_VecX; @@ -2230,20 +2230,20 @@ const uint Matcher::vector_ideal_reg(int size) { } // Limits on vector size (number of elements) loaded into vector. -const int Matcher::max_vector_size(const BasicType bt) { +int Matcher::max_vector_size(const BasicType bt) { assert(is_java_primitive(bt), "only primitive type vectors"); return vector_width_in_bytes(bt)/type2aelembytes(bt); } -const int Matcher::min_vector_size(const BasicType bt) { +int Matcher::min_vector_size(const BasicType bt) { return max_vector_size(bt); // Same as max. } -const int Matcher::superword_max_vector_size(const BasicType bt) { +int Matcher::superword_max_vector_size(const BasicType bt) { return Matcher::max_vector_size(bt); } -const int Matcher::scalable_vector_reg_size(const BasicType bt) { +int Matcher::scalable_vector_reg_size(const BasicType bt) { return -1; } diff --git a/src/hotspot/cpu/riscv/assembler_riscv.hpp b/src/hotspot/cpu/riscv/assembler_riscv.hpp index 02fbf5bfbf7..1fa89b0ae7b 100644 --- a/src/hotspot/cpu/riscv/assembler_riscv.hpp +++ b/src/hotspot/cpu/riscv/assembler_riscv.hpp @@ -270,7 +270,7 @@ class Address { return _mode != literal && base() == reg; } - const address target() const { + address target() const { assert_is_literal(); return _literal._target; } diff --git a/src/hotspot/cpu/riscv/javaFrameAnchor_riscv.hpp b/src/hotspot/cpu/riscv/javaFrameAnchor_riscv.hpp index f276d51b169..1293fae0c0b 100644 --- a/src/hotspot/cpu/riscv/javaFrameAnchor_riscv.hpp +++ b/src/hotspot/cpu/riscv/javaFrameAnchor_riscv.hpp @@ -71,7 +71,7 @@ public: intptr_t* last_Java_sp(void) const { return _last_Java_sp; } - const address last_Java_pc(void) { return _last_Java_pc; } + address last_Java_pc(void) { return _last_Java_pc; } private: diff --git a/src/hotspot/cpu/riscv/matcher_riscv.hpp b/src/hotspot/cpu/riscv/matcher_riscv.hpp index dc1432346f1..1da8f003122 100644 --- a/src/hotspot/cpu/riscv/matcher_riscv.hpp +++ b/src/hotspot/cpu/riscv/matcher_riscv.hpp @@ -34,7 +34,7 @@ // Whether this platform implements the scalable vector feature static const bool implements_scalable_vector = true; - static const bool supports_scalable_vector() { + static bool supports_scalable_vector() { return UseRVV; } @@ -143,12 +143,12 @@ } // Does the CPU supports vector unsigned comparison instructions? - static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) { + static constexpr bool supports_vector_comparison_unsigned(int vlen, BasicType bt) { return false; } // Some microarchitectures have mask registers used on vectors - static const bool has_predicated_vectors(void) { + static bool has_predicated_vectors(void) { return UseRVV; } diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad index 6678d591625..15c1267997c 100644 --- a/src/hotspot/cpu/riscv/riscv.ad +++ b/src/hotspot/cpu/riscv/riscv.ad @@ -1872,7 +1872,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) //============================================================================= -const bool Matcher::match_rule_supported(int opcode) { +bool Matcher::match_rule_supported(int opcode) { if (!has_match_rule(opcode)) { return false; } @@ -1921,7 +1921,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) } // Vector calling convention not yet implemented. -const bool Matcher::supports_vector_calling_convention(void) { +bool Matcher::supports_vector_calling_convention(void) { return false; } @@ -1954,7 +1954,7 @@ bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { } // Vector width in bytes. -const int Matcher::vector_width_in_bytes(BasicType bt) { +int Matcher::vector_width_in_bytes(BasicType bt) { if (UseRVV) { // The MaxVectorSize should have been set by detecting RVV max vector register size when check UseRVV. // MaxVectorSize == VM_Version::_initial_vector_length @@ -1969,11 +1969,11 @@ const int Matcher::vector_width_in_bytes(BasicType bt) { } // Limits on vector size (number of elements) loaded into vector. -const int Matcher::max_vector_size(const BasicType bt) { +int Matcher::max_vector_size(const BasicType bt) { return vector_width_in_bytes(bt) / type2aelembytes(bt); } -const int Matcher::min_vector_size(const BasicType bt) { +int Matcher::min_vector_size(const BasicType bt) { int max_size = max_vector_size(bt); // Limit the min vector size to 8 bytes. int size = 8 / type2aelembytes(bt); @@ -1988,12 +1988,12 @@ const int Matcher::min_vector_size(const BasicType bt) { return MIN2(size, max_size); } -const int Matcher::superword_max_vector_size(const BasicType bt) { +int Matcher::superword_max_vector_size(const BasicType bt) { return Matcher::max_vector_size(bt); } // Vector ideal reg. -const uint Matcher::vector_ideal_reg(int len) { +uint Matcher::vector_ideal_reg(int len) { assert(MaxVectorSize >= len, ""); if (UseRVV) { return Op_VecA; @@ -2003,7 +2003,7 @@ const uint Matcher::vector_ideal_reg(int len) { return 0; } -const int Matcher::scalable_vector_reg_size(const BasicType bt) { +int Matcher::scalable_vector_reg_size(const BasicType bt) { return Matcher::max_vector_size(bt); } diff --git a/src/hotspot/cpu/riscv/riscv_v.ad b/src/hotspot/cpu/riscv/riscv_v.ad index 3253653f676..268a51c327f 100644 --- a/src/hotspot/cpu/riscv/riscv_v.ad +++ b/src/hotspot/cpu/riscv/riscv_v.ad @@ -46,13 +46,13 @@ source %{ } } - const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { return match_rule_supported_vector(opcode, vlen, bt); } // Identify extra cases that we might want to provide match rules for vector nodes // and other intrinsics guarded with vector length (vlen) and element type (bt). - const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { if (!UseRVV) { return false; } @@ -79,14 +79,14 @@ source %{ return true; } - const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { + bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { if (!UseRVV) { return false; } return match_rule_supported_vector(opcode, vlen, bt); } - const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { + bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { return false; } diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.hpp b/src/hotspot/cpu/riscv/vm_version_riscv.hpp index 6d9ea5c2ac3..39c4150cea8 100644 --- a/src/hotspot/cpu/riscv/vm_version_riscv.hpp +++ b/src/hotspot/cpu/riscv/vm_version_riscv.hpp @@ -55,9 +55,9 @@ class VM_Version : public Abstract_VM_Version { _enabled = true; _value = value; } - const char* const pretty() { return _pretty; } - const uint64_t feature_bit() { return _feature_bit; } - const bool feature_string() { return _feature_string; } + const char* pretty() { return _pretty; } + uint64_t feature_bit() { return _feature_bit; } + bool feature_string() { return _feature_string; } bool enabled() { return _enabled; } int64_t value() { return _value; } virtual void update_flag() = 0; diff --git a/src/hotspot/cpu/s390/assembler_s390.hpp b/src/hotspot/cpu/s390/assembler_s390.hpp index 5762161662d..66243d6e0a9 100644 --- a/src/hotspot/cpu/s390/assembler_s390.hpp +++ b/src/hotspot/cpu/s390/assembler_s390.hpp @@ -349,7 +349,7 @@ class AddressLiteral { intptr_t value() const { return (intptr_t) _address; } - const relocInfo::relocType rtype() const { return _rspec.type(); } + relocInfo::relocType rtype() const { return _rspec.type(); } const RelocationHolder& rspec() const { return _rspec; } RelocationHolder rspec(int offset) const { diff --git a/src/hotspot/cpu/s390/matcher_s390.hpp b/src/hotspot/cpu/s390/matcher_s390.hpp index 0ab944a5426..e68dca34c3b 100644 --- a/src/hotspot/cpu/s390/matcher_s390.hpp +++ b/src/hotspot/cpu/s390/matcher_s390.hpp @@ -35,7 +35,7 @@ // Whether this platform implements the scalable vector feature static const bool implements_scalable_vector = false; - static constexpr const bool supports_scalable_vector() { + static constexpr bool supports_scalable_vector() { return false; } @@ -64,10 +64,10 @@ } // Suppress CMOVL. Conditional move available on z/Architecture only from z196 onwards. Not exploited yet. - static const int long_cmove_cost() { return ConditionalMoveLimit; } + static int long_cmove_cost() { return ConditionalMoveLimit; } // Suppress CMOVF. Conditional move available on z/Architecture only from z196 onwards. Not exploited yet. - static const int float_cmove_cost() { return ConditionalMoveLimit; } + static int float_cmove_cost() { return ConditionalMoveLimit; } // Set this as clone_shift_expressions. static bool narrow_oop_use_complex_address() { diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad index e1d3df97edf..6982349b3b5 100644 --- a/src/hotspot/cpu/s390/s390.ad +++ b/src/hotspot/cpu/s390/s390.ad @@ -1492,7 +1492,7 @@ static Register reg_to_register_object(int register_encoding) { return as_Register(register_encoding); } -const bool Matcher::match_rule_supported(int opcode) { +bool Matcher::match_rule_supported(int opcode) { if (!has_match_rule(opcode)) { return false; // no match rule present } @@ -1510,22 +1510,22 @@ const bool Matcher::match_rule_supported(int opcode) { return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { return match_rule_supported_vector(opcode, vlen, bt); } -const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) { return false; } return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { return false; } -const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { +bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { return false; } @@ -1538,7 +1538,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) } // Vector calling convention not yet implemented. -const bool Matcher::supports_vector_calling_convention(void) { +bool Matcher::supports_vector_calling_convention(void) { return false; } @@ -1550,32 +1550,32 @@ OptoRegPair Matcher::vector_return_value(uint ideal_reg) { //----------SUPERWORD HELPERS---------------------------------------- // Vector width in bytes. -const int Matcher::vector_width_in_bytes(BasicType bt) { +int Matcher::vector_width_in_bytes(BasicType bt) { assert(MaxVectorSize == 8, ""); return 8; } // Vector ideal reg. -const uint Matcher::vector_ideal_reg(int size) { +uint Matcher::vector_ideal_reg(int size) { assert(MaxVectorSize == 8 && size == 8, ""); return Op_RegL; } // Limits on vector size (number of elements) loaded into vector. -const int Matcher::max_vector_size(const BasicType bt) { +int Matcher::max_vector_size(const BasicType bt) { assert(is_java_primitive(bt), "only primitive type vectors"); return vector_width_in_bytes(bt)/type2aelembytes(bt); } -const int Matcher::min_vector_size(const BasicType bt) { +int Matcher::min_vector_size(const BasicType bt) { return max_vector_size(bt); // Same as max. } -const int Matcher::superword_max_vector_size(const BasicType bt) { +int Matcher::superword_max_vector_size(const BasicType bt) { return Matcher::max_vector_size(bt); } -const int Matcher::scalable_vector_reg_size(const BasicType bt) { +int Matcher::scalable_vector_reg_size(const BasicType bt) { return -1; } diff --git a/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp b/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp index 24d6b002b31..dfd9d59016f 100644 --- a/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp +++ b/src/hotspot/cpu/x86/gc/shared/barrierSetNMethod_x86.cpp @@ -169,7 +169,7 @@ void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) { // NativeNMethodCmpBarrier::verify() will immediately complain when it does // not find the expected native instruction at this offset, which needs updating. // Note that this offset is invariant of PreserveFramePointer. -static const int entry_barrier_offset(nmethod* nm) { +static int entry_barrier_offset(nmethod* nm) { #ifdef _LP64 if (nm->is_compiled_by_c2()) { return -14; diff --git a/src/hotspot/cpu/x86/matcher_x86.hpp b/src/hotspot/cpu/x86/matcher_x86.hpp index 82a39fe8b1d..bc249c0f33a 100644 --- a/src/hotspot/cpu/x86/matcher_x86.hpp +++ b/src/hotspot/cpu/x86/matcher_x86.hpp @@ -97,14 +97,14 @@ } // Prefer ConN+DecodeN over ConP. - static const bool const_oop_prefer_decode() { + static bool const_oop_prefer_decode() { NOT_LP64(ShouldNotCallThis();) // Prefer ConN+DecodeN over ConP. return true; } // Prefer ConP over ConNKlass+DecodeNKlass. - static const bool const_klass_prefer_decode() { + static bool const_klass_prefer_decode() { NOT_LP64(ShouldNotCallThis();) return false; } @@ -165,12 +165,12 @@ } // Does the CPU supports vector unsigned comparison instructions? - static const bool supports_vector_comparison_unsigned(int vlen, BasicType bt) { + static constexpr bool supports_vector_comparison_unsigned(int vlen, BasicType bt) { return true; } // Some microarchitectures have mask registers used on vectors - static const bool has_predicated_vectors(void) { + static bool has_predicated_vectors(void) { return VM_Version::supports_evex(); } diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad index 0e680275c49..3147857d249 100644 --- a/src/hotspot/cpu/x86/x86.ad +++ b/src/hotspot/cpu/x86/x86.ad @@ -1392,7 +1392,7 @@ Assembler::Width widthForType(BasicType bt) { static address vector_double_signflip() { return StubRoutines::x86::vector_double_sign_flip();} //============================================================================= -const bool Matcher::match_rule_supported(int opcode) { +bool Matcher::match_rule_supported(int opcode) { if (!has_match_rule(opcode)) { return false; // no match rule present } @@ -1694,13 +1694,13 @@ static inline bool is_pop_count_instr_target(BasicType bt) { (is_non_subword_integral_type(bt) && VM_Version::supports_avx512_vpopcntdq()); } -const bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_superword(int opcode, int vlen, BasicType bt) { return match_rule_supported_vector(opcode, vlen, bt); } // Identify extra cases that we might want to provide match rules for vector nodes and // other intrinsics guarded with vector length (vlen) and element type (bt). -const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) { const bool is_LP64 = LP64_ONLY(true) NOT_LP64(false); if (!match_rule_supported(opcode)) { return false; @@ -1988,7 +1988,7 @@ const bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType return true; // Per default match rules are supported. } -const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { +bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) { // ADLC based match_rule_supported routine checks for the existence of pattern based // on IR opcode. Most of the unary/binary/ternary masked operation share the IR nodes // of their non-masked counterpart with mask edge being the differentiator. @@ -2147,7 +2147,7 @@ const bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, Bas } } -const bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { +bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) { return false; } @@ -2219,7 +2219,7 @@ const TypeVectMask* Matcher::predicate_reg_type(const Type* elemTy, int length) } // Max vector size in bytes. 0 if not supported. -const int Matcher::vector_width_in_bytes(BasicType bt) { +int Matcher::vector_width_in_bytes(BasicType bt) { assert(is_java_primitive(bt), "only primitive type vectors"); if (UseSSE < 2) return 0; // SSE2 supports 128bit vectors for all types. @@ -2262,10 +2262,10 @@ const int Matcher::vector_width_in_bytes(BasicType bt) { } // Limits on vector size (number of elements) loaded into vector. -const int Matcher::max_vector_size(const BasicType bt) { +int Matcher::max_vector_size(const BasicType bt) { return vector_width_in_bytes(bt)/type2aelembytes(bt); } -const int Matcher::min_vector_size(const BasicType bt) { +int Matcher::min_vector_size(const BasicType bt) { int max_size = max_vector_size(bt); // Min size which can be loaded into vector is 4 bytes. int size = (type2aelembytes(bt) == 1) ? 4 : 2; @@ -2276,7 +2276,7 @@ const int Matcher::min_vector_size(const BasicType bt) { return MIN2(size,max_size); } -const int Matcher::superword_max_vector_size(const BasicType bt) { +int Matcher::superword_max_vector_size(const BasicType bt) { // Limit the max vector size for auto vectorization to 256 bits (32 bytes) // by default on Cascade Lake if (VM_Version::is_default_intel_cascade_lake()) { @@ -2285,12 +2285,12 @@ const int Matcher::superword_max_vector_size(const BasicType bt) { return Matcher::max_vector_size(bt); } -const int Matcher::scalable_vector_reg_size(const BasicType bt) { +int Matcher::scalable_vector_reg_size(const BasicType bt) { return -1; } // Vector ideal reg corresponding to specified size in bytes -const uint Matcher::vector_ideal_reg(int size) { +uint Matcher::vector_ideal_reg(int size) { assert(MaxVectorSize >= size, ""); switch(size) { case 4: return Op_VecS; diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad index baa81934012..d2d3c6b6fa1 100644 --- a/src/hotspot/cpu/x86/x86_32.ad +++ b/src/hotspot/cpu/x86/x86_32.ad @@ -1407,7 +1407,7 @@ uint MachUEPNode::size(PhaseRegAlloc *ra_) const { //============================================================================= // Vector calling convention not supported. -const bool Matcher::supports_vector_calling_convention() { +bool Matcher::supports_vector_calling_convention() { return false; } diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad index abc43081afb..16733dbe1d1 100644 --- a/src/hotspot/cpu/x86/x86_64.ad +++ b/src/hotspot/cpu/x86/x86_64.ad @@ -1718,7 +1718,7 @@ uint MachUEPNode::size(PhaseRegAlloc* ra_) const //============================================================================= -const bool Matcher::supports_vector_calling_convention(void) { +bool Matcher::supports_vector_calling_convention(void) { if (EnableVectorSupport && UseVectorStubs) { return true; } diff --git a/src/hotspot/share/adlc/output_c.cpp b/src/hotspot/share/adlc/output_c.cpp index b0c4e7eff6b..5276987eec4 100644 --- a/src/hotspot/share/adlc/output_c.cpp +++ b/src/hotspot/share/adlc/output_c.cpp @@ -906,14 +906,14 @@ void ArchDesc::build_pipe_classes(FILE *fp_cpp) { pipeclass->forceSerialization() ? "true" : "false", pipeclass->mayHaveNoCode() ? "true" : "false" ); if (paramcount > 0) { - fprintf(fp_cpp, "\n (enum machPipelineStages * const) pipeline_reads_%03d,\n ", + fprintf(fp_cpp, "\n (enum machPipelineStages *) pipeline_reads_%03d,\n ", pipeline_reads_index+1); } else fprintf(fp_cpp, " nullptr,"); - fprintf(fp_cpp, " (enum machPipelineStages * const) pipeline_res_stages_%03d,\n", + fprintf(fp_cpp, " (enum machPipelineStages *) pipeline_res_stages_%03d,\n", pipeline_res_stages_index+1); - fprintf(fp_cpp, " (uint * const) pipeline_res_cycles_%03d,\n", + fprintf(fp_cpp, " (uint *) pipeline_res_cycles_%03d,\n", pipeline_res_cycles_index+1); fprintf(fp_cpp, " Pipeline_Use(%s, (Pipeline_Use_Element *)", pipeline_res_args.name(pipeline_res_mask_index)); @@ -4209,7 +4209,7 @@ void ArchDesc::buildMachNodeGenerator(FILE *fp_cpp) { // instruction has a matching rule for the host architecture. void ArchDesc::buildInstructMatchCheck(FILE *fp_cpp) const { fprintf(fp_cpp, "\n\n"); - fprintf(fp_cpp, "const bool Matcher::has_match_rule(int opcode) {\n"); + fprintf(fp_cpp, "bool Matcher::has_match_rule(int opcode) {\n"); fprintf(fp_cpp, " assert(_last_machine_leaf < opcode && opcode < _last_opcode, \"opcode in range\");\n"); fprintf(fp_cpp, " return _hasMatchRule[opcode];\n"); fprintf(fp_cpp, "}\n\n"); diff --git a/src/hotspot/share/c1/c1_ValueType.hpp b/src/hotspot/share/c1/c1_ValueType.hpp index a5553d3599d..488b3372518 100644 --- a/src/hotspot/share/c1/c1_ValueType.hpp +++ b/src/hotspot/share/c1/c1_ValueType.hpp @@ -114,7 +114,7 @@ class ValueType: public CompilationResourceObj { assert(_size > -1, "shouldn't be asking for size"); return _size; } - virtual const char tchar() const = 0; // the type 'character' for printing + virtual char tchar() const = 0; // the type 'character' for printing virtual const char* name() const = 0; // the type name for printing virtual bool is_constant() const { return false; } @@ -177,7 +177,7 @@ class VoidType: public ValueType { public: VoidType(): ValueType(voidTag, 0) {} virtual ValueType* base() const { return voidType; } - virtual const char tchar() const { return 'v'; } + virtual char tchar() const { return 'v'; } virtual const char* name() const { return "void"; } virtual VoidType* as_VoidType() { return this; } }; @@ -187,7 +187,7 @@ class IntType: public ValueType { public: IntType(): ValueType(intTag, 1) {} virtual ValueType* base() const { return intType; } - virtual const char tchar() const { return 'i'; } + virtual char tchar() const { return 'i'; } virtual const char* name() const { return "int"; } virtual IntType* as_IntType() { return this; } }; @@ -211,7 +211,7 @@ class LongType: public ValueType { public: LongType(): ValueType(longTag, 2) {} virtual ValueType* base() const { return longType; } - virtual const char tchar() const { return 'l'; } + virtual char tchar() const { return 'l'; } virtual const char* name() const { return "long"; } virtual LongType* as_LongType() { return this; } }; @@ -235,7 +235,7 @@ class FloatType: public ValueType { public: FloatType(): ValueType(floatTag, 1) {} virtual ValueType* base() const { return floatType; } - virtual const char tchar() const { return 'f'; } + virtual char tchar() const { return 'f'; } virtual const char* name() const { return "float"; } virtual FloatType* as_FloatType() { return this; } }; @@ -259,7 +259,7 @@ class DoubleType: public ValueType { public: DoubleType(): ValueType(doubleTag, 2) {} virtual ValueType* base() const { return doubleType; } - virtual const char tchar() const { return 'd'; } + virtual char tchar() const { return 'd'; } virtual const char* name() const { return "double"; } virtual DoubleType* as_DoubleType() { return this; } }; @@ -283,7 +283,7 @@ class ObjectType: public ValueType { public: ObjectType(): ValueType(objectTag, 1) {} virtual ValueType* base() const { return objectType; } - virtual const char tchar() const { return 'a'; } + virtual char tchar() const { return 'a'; } virtual const char* name() const { return "object"; } virtual ObjectType* as_ObjectType() { return this; } virtual ciObject* constant_value() const { ShouldNotReachHere(); return nullptr; } @@ -371,7 +371,7 @@ class MetadataType: public ValueType { public: MetadataType(): ValueType(metaDataTag, 1) {} virtual ValueType* base() const { return objectType; } - virtual const char tchar() const { return 'a'; } + virtual char tchar() const { return 'a'; } virtual const char* name() const { return "object"; } virtual MetadataType* as_MetadataType() { return this; } bool is_loaded() const; @@ -428,7 +428,7 @@ class AddressType: public ValueType { public: AddressType(): ValueType(addressTag, 1) {} virtual ValueType* base() const { return addressType; } - virtual const char tchar() const { return 'r'; } + virtual char tchar() const { return 'r'; } virtual const char* name() const { return "address"; } virtual AddressType* as_AddressType() { return this; } }; @@ -453,7 +453,7 @@ class IllegalType: public ValueType { public: IllegalType(): ValueType(illegalTag, -1) {} virtual ValueType* base() const { return illegalType; } - virtual const char tchar() const { return ' '; } + virtual char tchar() const { return ' '; } virtual const char* name() const { return "illegal"; } virtual IllegalType* as_IllegalType() { return this; } }; diff --git a/src/hotspot/share/classfile/classFileParser.cpp b/src/hotspot/share/classfile/classFileParser.cpp index e4d6df4b21f..6cacc785fa1 100644 --- a/src/hotspot/share/classfile/classFileParser.cpp +++ b/src/hotspot/share/classfile/classFileParser.cpp @@ -4961,7 +4961,7 @@ void ClassFileParser::verify_legal_field_signature(const Symbol* name, TRAPS) const { if (!_need_verify) { return; } - const char* const bytes = (const char* const)signature->bytes(); + const char* const bytes = (const char*)signature->bytes(); const unsigned int length = signature->utf8_length(); const char* const p = skip_over_field_signature(bytes, false, length, CHECK); @@ -5672,7 +5672,7 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream, parse_constant_pool(stream, cp, _orig_cp_size, CHECK); - assert(cp_size == (const u2)cp->length(), "invariant"); + assert(cp_size == (u2)cp->length(), "invariant"); // ACCESS FLAGS stream->guarantee_more(8, CHECK); // flags, this_class, super_class, infs_len diff --git a/src/hotspot/share/classfile/classFileStream.cpp b/src/hotspot/share/classfile/classFileStream.cpp index ed530353216..ed67481396d 100644 --- a/src/hotspot/share/classfile/classFileStream.cpp +++ b/src/hotspot/share/classfile/classFileStream.cpp @@ -54,7 +54,7 @@ const u1* ClassFileStream::clone_buffer() const { return new_buffer_start; } -const char* const ClassFileStream::clone_source() const { +const char* ClassFileStream::clone_source() const { const char* const src = source(); char* source_copy = nullptr; if (src != nullptr) { diff --git a/src/hotspot/share/classfile/classFileStream.hpp b/src/hotspot/share/classfile/classFileStream.hpp index b84956983b9..41a7df68bd5 100644 --- a/src/hotspot/share/classfile/classFileStream.hpp +++ b/src/hotspot/share/classfile/classFileStream.hpp @@ -49,7 +49,7 @@ class ClassFileStream: public ResourceObj { protected: const u1* clone_buffer() const; - const char* const clone_source() const; + const char* clone_source() const; public: static const bool verify; diff --git a/src/hotspot/share/compiler/compilerOracle.cpp b/src/hotspot/share/compiler/compilerOracle.cpp index a3d5870f9cc..031e9b25b7a 100644 --- a/src/hotspot/share/compiler/compilerOracle.cpp +++ b/src/hotspot/share/compiler/compilerOracle.cpp @@ -204,7 +204,7 @@ template<> void TypedMethodOptionMatcher::set_value(bool value) { } template<> void TypedMethodOptionMatcher::set_value(ccstr value) { - _u.ccstr_value = (const ccstr)os::strdup_check_oom(value); + _u.ccstr_value = (ccstr)os::strdup_check_oom(value); } void TypedMethodOptionMatcher::print() { diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 9fa69eead3d..96bc7736467 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -1357,8 +1357,8 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask { _humongous_regions_removed(0) { } size_t freed_bytes() { return _freed_bytes; } - const uint old_regions_removed() { return _old_regions_removed; } - const uint humongous_regions_removed() { return _humongous_regions_removed; } + uint old_regions_removed() { return _old_regions_removed; } + uint humongous_regions_removed() { return _humongous_regions_removed; } bool do_heap_region(HeapRegion *hr) { if (hr->used() > 0 && hr->live_bytes() == 0 && !hr->is_young()) { diff --git a/src/hotspot/share/gc/g1/g1HRPrinter.hpp b/src/hotspot/share/gc/g1/g1HRPrinter.hpp index 9a522a4029e..876be95f074 100644 --- a/src/hotspot/share/gc/g1/g1HRPrinter.hpp +++ b/src/hotspot/share/gc/g1/g1HRPrinter.hpp @@ -46,7 +46,7 @@ public: // In some places we iterate over a list in order to generate output // for the list's elements. By exposing this we can avoid this // iteration if the printer is not active. - const bool is_active() { return log_is_enabled(Trace, gc, region); } + bool is_active() { return log_is_enabled(Trace, gc, region); } // The methods below are convenient wrappers for the print() method. diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.hpp b/src/hotspot/share/gc/parallel/psParallelCompact.hpp index 07c420bc0dd..d9644d3ddba 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp @@ -1246,7 +1246,7 @@ class PSParallelCompact : AllStatic { static ParallelCompactData& summary_data() { return _summary_data; } // Reference Processing - static ReferenceProcessor* const ref_processor() { return _ref_processor; } + static ReferenceProcessor* ref_processor() { return _ref_processor; } static STWGCTimer* gc_timer() { return &_gc_timer; } diff --git a/src/hotspot/share/gc/parallel/psScavenge.hpp b/src/hotspot/share/gc/parallel/psScavenge.hpp index 02e20fd2f08..69fa18aa0be 100644 --- a/src/hotspot/share/gc/parallel/psScavenge.hpp +++ b/src/hotspot/share/gc/parallel/psScavenge.hpp @@ -77,7 +77,7 @@ class PSScavenge: AllStatic { static bool should_attempt_scavenge(); // Private accessors - static PSCardTable* const card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; } + static PSCardTable* card_table() { assert(_card_table != nullptr, "Sanity"); return _card_table; } static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; } public: @@ -92,7 +92,7 @@ class PSScavenge: AllStatic { _span_based_discoverer.set_span(mr); } // Used by scavenge_contents - static ReferenceProcessor* const reference_processor() { + static ReferenceProcessor* reference_processor() { assert(_ref_processor != nullptr, "Sanity"); return _ref_processor; } diff --git a/src/hotspot/share/gc/serial/defNewGeneration.hpp b/src/hotspot/share/gc/serial/defNewGeneration.hpp index 03ee369fcd1..0313d408224 100644 --- a/src/hotspot/share/gc/serial/defNewGeneration.hpp +++ b/src/hotspot/share/gc/serial/defNewGeneration.hpp @@ -167,7 +167,7 @@ class DefNewGeneration: public Generation { // allocate and initialize ("weak") refs processing support void ref_processor_init(); - ReferenceProcessor* const ref_processor() { return _ref_processor; } + ReferenceProcessor* ref_processor() { return _ref_processor; } // Accessing spaces ContiguousSpace* eden() const { return _eden_space; } diff --git a/src/hotspot/share/gc/serial/markSweep.hpp b/src/hotspot/share/gc/serial/markSweep.hpp index 65ed1c52e39..d1020cc733c 100644 --- a/src/hotspot/share/gc/serial/markSweep.hpp +++ b/src/hotspot/share/gc/serial/markSweep.hpp @@ -132,7 +132,7 @@ class MarkSweep : AllStatic { static uint total_invocations() { return _total_invocations; } // Reference Processing - static ReferenceProcessor* const ref_processor() { return _ref_processor; } + static ReferenceProcessor* ref_processor() { return _ref_processor; } static STWGCTimer* gc_timer() { return _gc_timer; } static SerialOldTracer* gc_tracer() { return _gc_tracer; } diff --git a/src/hotspot/share/gc/shared/gcHeapSummary.hpp b/src/hotspot/share/gc/shared/gcHeapSummary.hpp index 588dd3f6631..fb048d06a41 100644 --- a/src/hotspot/share/gc/shared/gcHeapSummary.hpp +++ b/src/hotspot/share/gc/shared/gcHeapSummary.hpp @@ -122,11 +122,11 @@ class G1HeapSummary : public GCHeapSummary { public: G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed, size_t oldGenUsed, uint numberOfRegions) : GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed), _oldGenUsed(oldGenUsed), _numberOfRegions(numberOfRegions) { } - const size_t edenUsed() const { return _edenUsed; } - const size_t edenCapacity() const { return _edenCapacity; } - const size_t survivorUsed() const { return _survivorUsed; } - const size_t oldGenUsed() const { return _oldGenUsed; } - const uint numberOfRegions() const { return _numberOfRegions; } + size_t edenUsed() const { return _edenUsed; } + size_t edenCapacity() const { return _edenCapacity; } + size_t survivorUsed() const { return _survivorUsed; } + size_t oldGenUsed() const { return _oldGenUsed; } + uint numberOfRegions() const { return _numberOfRegions; } virtual void accept(GCHeapSummaryVisitor* visitor) const { visitor->visit(this); diff --git a/src/hotspot/share/gc/shared/plab.hpp b/src/hotspot/share/gc/shared/plab.hpp index cb1a44ec167..2eebdeeadb4 100644 --- a/src/hotspot/share/gc/shared/plab.hpp +++ b/src/hotspot/share/gc/shared/plab.hpp @@ -178,11 +178,11 @@ public: size_t used() const { return allocated() - (wasted() + unused()); } size_t undo_wasted() const { return _undo_wasted; } - static const size_t min_size() { + static size_t min_size() { return PLAB::min_size(); } - static const size_t max_size() { + static size_t max_size() { return PLAB::max_size(); } diff --git a/src/hotspot/share/gc/shared/workerThread.hpp b/src/hotspot/share/gc/shared/workerThread.hpp index d3b246c0930..39687e2581c 100644 --- a/src/hotspot/share/gc/shared/workerThread.hpp +++ b/src/hotspot/share/gc/shared/workerThread.hpp @@ -48,7 +48,7 @@ private: _gc_id(GCId::current_or_undefined()) {} const char* name() const { return _name; } - const uint gc_id() const { return _gc_id; } + uint gc_id() const { return _gc_id; } virtual void work(uint worker_id) = 0; }; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp index ac1804237d0..63f5082cafd 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp @@ -236,10 +236,10 @@ public: inline size_t num_regions() const { return _num_regions; } inline bool is_heap_region_special() { return _heap_region_special; } - inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const; + inline ShenandoahHeapRegion* heap_region_containing(const void* addr) const; inline size_t heap_region_index_containing(const void* addr) const; - inline ShenandoahHeapRegion* const get_region(size_t region_idx) const; + inline ShenandoahHeapRegion* get_region(size_t region_idx) const; void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp index 4158f4bee22..efdc89ac648 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp @@ -80,7 +80,7 @@ inline size_t ShenandoahHeap::heap_region_index_containing(const void* addr) con return index; } -inline ShenandoahHeapRegion* const ShenandoahHeap::heap_region_containing(const void* addr) const { +inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void* addr) const { size_t index = heap_region_index_containing(addr); ShenandoahHeapRegion* const result = get_region(index); assert(addr >= result->bottom() && addr < result->end(), "Heap region contains the address: " PTR_FORMAT, p2i(addr)); @@ -548,7 +548,7 @@ inline void ShenandoahHeap::marked_object_oop_iterate(ShenandoahHeapRegion* regi } } -inline ShenandoahHeapRegion* const ShenandoahHeap::get_region(size_t region_idx) const { +inline ShenandoahHeapRegion* ShenandoahHeap::get_region(size_t region_idx) const { if (region_idx < _num_regions) { return _regions[region_idx]; } else { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index 755e2cc1c9a..d2487705d5a 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -163,7 +163,7 @@ private: void report_illegal_transition(const char* method); public: - static const int region_states_num() { + static int region_states_num() { return _REGION_STATES_NUM; } diff --git a/src/hotspot/share/gc/x/xRelocate.cpp b/src/hotspot/share/gc/x/xRelocate.cpp index c7b5287b474..645989eaba3 100644 --- a/src/hotspot/share/gc/x/xRelocate.cpp +++ b/src/hotspot/share/gc/x/xRelocate.cpp @@ -189,7 +189,7 @@ public: page->undo_alloc_object(addr, size); } - const size_t in_place_count() const { + size_t in_place_count() const { return _in_place_count; } }; @@ -266,7 +266,7 @@ public: page->undo_alloc_object_atomic(addr, size); } - const size_t in_place_count() const { + size_t in_place_count() const { return _in_place_count; } }; diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp index d4e3e92b3aa..60a6616d0a7 100644 --- a/src/hotspot/share/gc/z/zRelocate.cpp +++ b/src/hotspot/share/gc/z/zRelocate.cpp @@ -473,7 +473,7 @@ public: page->undo_alloc_object(addr, size); } - const size_t in_place_count() const { + size_t in_place_count() const { return _in_place_count; } }; @@ -567,7 +567,7 @@ public: page->undo_alloc_object_atomic(addr, size); } - const size_t in_place_count() const { + size_t in_place_count() const { return _in_place_count; } }; diff --git a/src/hotspot/share/jfr/jni/jfrUpcalls.cpp b/src/hotspot/share/jfr/jni/jfrUpcalls.cpp index eaf11380895..bb316700ac0 100644 --- a/src/hotspot/share/jfr/jni/jfrUpcalls.cpp +++ b/src/hotspot/share/jfr/jni/jfrUpcalls.cpp @@ -65,16 +65,16 @@ static bool initialize(TRAPS) { return initialized; } -static const typeArrayOop invoke(jlong trace_id, - jboolean force_instrumentation, - jboolean boot_class_loader, - jclass class_being_redefined, - jint class_data_len, - const unsigned char* class_data, - Symbol* method_sym, - Symbol* signature_sym, - jint& new_bytes_length, - TRAPS) { +static typeArrayOop invoke(jlong trace_id, + jboolean force_instrumentation, + jboolean boot_class_loader, + jclass class_being_redefined, + jint class_data_len, + const unsigned char* class_data, + Symbol* method_sym, + Symbol* signature_sym, + jint& new_bytes_length, + TRAPS) { DEBUG_ONLY(JfrJavaSupport::check_java_thread_in_vm(THREAD)); const Klass* klass = SystemDictionary::resolve_or_fail(jvm_upcalls_class_sym, true, CHECK_NULL); assert(klass != nullptr, "invariant"); diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edge.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edge.cpp index 54c9ef3cb6b..d66c1c0f870 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edge.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edge.cpp @@ -29,11 +29,11 @@ Edge::Edge(const Edge* parent, UnifiedOopRef reference) : _parent(parent), _reference(reference) {} -const oop Edge::pointee() const { +oop Edge::pointee() const { return _reference.dereference(); } -const oop Edge::reference_owner() const { +oop Edge::reference_owner() const { return is_root() ? (oop)nullptr : _parent->pointee(); } diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp b/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp index 47aef3d3fee..fecf5ae84ed 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edge.hpp @@ -45,8 +45,8 @@ class Edge { bool is_root() const { return _parent == nullptr; } - const oop pointee() const; - const oop reference_owner() const; + oop pointee() const; + oop reference_owner() const; size_t distance_to_root() const; void* operator new (size_t sz, void* here) { diff --git a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp index 69c88cba8ec..edb87c7fc8e 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/edgeUtils.cpp @@ -97,7 +97,7 @@ static int array_offset(const Edge& edge) { UnifiedOopRef reference = edge.reference(); assert(!reference.is_null(), "invariant"); assert(ref_owner->is_array(), "invariant"); - const objArrayOop ref_owner_array = static_cast(ref_owner); + const objArrayOop ref_owner_array = static_cast(ref_owner); const int offset = (int)pointer_delta(reference.addr(), ref_owner_array->base(), heapOopSize); assert(offset >= 0 && offset < ref_owner_array->length(), "invariant"); return offset; diff --git a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp index 7c5c88ef6ca..2c2d7fe7b3d 100644 --- a/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp +++ b/src/hotspot/share/jfr/leakprofiler/checkpoint/objectSampleWriter.cpp @@ -408,11 +408,11 @@ class RootResolutionSet : public ResourceObj, public RootCallback { private: GrowableArray* _unresolved_roots; - const uintptr_t high() const { + uintptr_t high() const { return _unresolved_roots->last()->_data._root_edge->reference().addr(); } - const uintptr_t low() const { + uintptr_t low() const { return _unresolved_roots->first()->_data._root_edge->reference().addr(); } diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp index 492b7fd4131..a732191af3d 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.cpp @@ -35,7 +35,7 @@ void ObjectSample::reset() { _virtual_thread = false; } -const oop ObjectSample::object() const { +oop ObjectSample::object() const { return _object.resolve(); } diff --git a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp index 734aaea3255..b11e9c78728 100644 --- a/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp +++ b/src/hotspot/share/jfr/leakprofiler/sampling/objectSample.hpp @@ -102,7 +102,7 @@ class ObjectSample : public JfrCHeapObj { bool is_dead() const; - const oop object() const; + oop object() const; void set_object(oop object); const oop* object_addr() const; @@ -141,7 +141,7 @@ class ObjectSample : public JfrCHeapObj { return _allocation_time; } - const void set_allocation_time(const JfrTicks& time) { + void set_allocation_time(const JfrTicks& time) { _allocation_time = Ticks(time.value()); } diff --git a/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp b/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp index 643cc0c5a3c..d9b628593a4 100644 --- a/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp +++ b/src/hotspot/share/jfr/periodic/jfrModuleEvent.cpp @@ -62,7 +62,7 @@ static void write_module_dependency_event(const void* from_module, const ModuleE EventModuleRequire event(UNTIMED); event.set_starttime(invocation_time); event.set_endtime(invocation_time); - event.set_source((const ModuleEntry* const)from_module); + event.set_source((const ModuleEntry*)from_module); event.set_requiredModule(to_module); event.commit(); } diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp index 615a092d778..9a87c316671 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrThreadGroup.cpp @@ -53,7 +53,7 @@ class JfrThreadGroupPointers : public ResourceObj { JfrThreadGroupPointers(Handle thread_group_handle, jweak thread_group_weak_ref); Handle thread_group_handle() const; jweak thread_group_weak_ref() const; - oopDesc* const thread_group_oop() const; + oopDesc* thread_group_oop() const; jweak transfer_weak_global_handle_ownership(); void clear_weak_ref(); }; @@ -70,7 +70,7 @@ jweak JfrThreadGroupPointers::thread_group_weak_ref() const { return _thread_group_weak_ref; } -oopDesc* const JfrThreadGroupPointers::thread_group_oop() const { +oopDesc* JfrThreadGroupPointers::thread_group_oop() const { assert(_thread_group_weak_ref == nullptr || JNIHandles::resolve_non_null(_thread_group_weak_ref) == _thread_group_handle(), "invariant"); return _thread_group_handle(); @@ -209,7 +209,7 @@ class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj { traceid thread_group_id() const { return _thread_group_id; } void set_thread_group_id(traceid tgid) { _thread_group_id = tgid; } - const char* const thread_group_name() const { return _thread_group_name; } + const char* thread_group_name() const { return _thread_group_name; } void set_thread_group_name(const char* tgname); traceid parent_group_id() const { return _parent_group_id; } @@ -217,7 +217,7 @@ class JfrThreadGroup::JfrThreadGroupEntry : public JfrCHeapObj { void set_thread_group(JfrThreadGroupPointers& ptrs); bool is_equal(const JfrThreadGroupPointers& ptrs) const; - const oop thread_group() const; + oop thread_group() const; }; JfrThreadGroup::JfrThreadGroupEntry::JfrThreadGroupEntry(const char* tgname, JfrThreadGroupPointers& ptrs) : @@ -248,7 +248,7 @@ void JfrThreadGroup::JfrThreadGroupEntry::set_thread_group_name(const char* tgna } } -const oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const { +oop JfrThreadGroup::JfrThreadGroupEntry::thread_group() const { return _thread_group_weak_ref != nullptr ? JNIHandles::resolve(_thread_group_weak_ref) : _thread_group_oop; } diff --git a/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp b/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp index 5388349c3f6..cd476302289 100644 --- a/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp +++ b/src/hotspot/share/jfr/support/jfrJdkJfrEvent.cpp @@ -150,7 +150,7 @@ jobject JdkJfrEvent::get_all_klasses(TRAPS) { JavaValue result(T_BOOLEAN); for (int i = 0; i < event_subklasses.length(); ++i) { - const jclass clazz = (const jclass)event_subklasses.at(i); + const jclass clazz = (jclass)event_subklasses.at(i); assert(JdkJfrEvent::is_subklass(clazz), "invariant"); JfrJavaArguments args(&result, array_list_klass, add_method_sym, add_method_sig_sym); args.set_receiver(h_array_list()); diff --git a/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp b/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp index badb05394cd..5995edd01c5 100644 --- a/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp +++ b/src/hotspot/share/jfr/support/jfrTraceIdExtension.hpp @@ -32,7 +32,7 @@ #define DEFINE_TRACE_ID_METHODS \ traceid trace_id() const { return _trace_id; } \ - traceid* const trace_id_addr() const { return &_trace_id; } \ + traceid* trace_id_addr() const { return &_trace_id; } \ void set_trace_id(traceid id) const { _trace_id = id; } #define DEFINE_TRACE_ID_SIZE \ diff --git a/src/hotspot/share/oops/instanceKlass.hpp b/src/hotspot/share/oops/instanceKlass.hpp index 060073ad7a9..e6165000cb0 100644 --- a/src/hotspot/share/oops/instanceKlass.hpp +++ b/src/hotspot/share/oops/instanceKlass.hpp @@ -116,7 +116,7 @@ class OopMapBlock { } // sizeof(OopMapBlock) in words. - static const int size_in_words() { + static int size_in_words() { return align_up((int)sizeof(OopMapBlock), wordSize) >> LogBytesPerWord; } diff --git a/src/hotspot/share/oops/method.hpp b/src/hotspot/share/oops/method.hpp index 06f0b8cf7d7..29e09ba72b0 100644 --- a/src/hotspot/share/oops/method.hpp +++ b/src/hotspot/share/oops/method.hpp @@ -410,7 +410,7 @@ class Method : public Metadata { // nmethod/verified compiler entry address verified_code_entry(); bool check_code() const; // Not inline to avoid circular ref - CompiledMethod* volatile code() const; + CompiledMethod* code() const; // Locks CompiledMethod_lock if not held. void unlink_code(CompiledMethod *compare); diff --git a/src/hotspot/share/oops/method.inline.hpp b/src/hotspot/share/oops/method.inline.hpp index ae8e324c01a..2c7b4c21aa7 100644 --- a/src/hotspot/share/oops/method.inline.hpp +++ b/src/hotspot/share/oops/method.inline.hpp @@ -45,7 +45,7 @@ inline void Method::set_method_data(MethodData* data) { Atomic::release_store(&_method_data, data); } -inline CompiledMethod* volatile Method::code() const { +inline CompiledMethod* Method::code() const { assert( check_code(), "" ); return Atomic::load_acquire(&_code); } diff --git a/src/hotspot/share/opto/callnode.hpp b/src/hotspot/share/opto/callnode.hpp index e74143f3644..f212f87bf51 100644 --- a/src/hotspot/share/opto/callnode.hpp +++ b/src/hotspot/share/opto/callnode.hpp @@ -602,8 +602,8 @@ public: } const TypeFunc* tf() const { return _tf; } - const address entry_point() const { return _entry_point; } - const float cnt() const { return _cnt; } + address entry_point() const { return _entry_point; } + float cnt() const { return _cnt; } CallGenerator* generator() const { return _generator; } void set_tf(const TypeFunc* tf) { _tf = tf; } diff --git a/src/hotspot/share/opto/castnode.hpp b/src/hotspot/share/opto/castnode.hpp index d4b2d616216..eb9bcaa54b1 100644 --- a/src/hotspot/share/opto/castnode.hpp +++ b/src/hotspot/share/opto/castnode.hpp @@ -117,7 +117,7 @@ class CastIINode: public ConstraintCastNode { virtual Node* Identity(PhaseGVN* phase); virtual const Type* Value(PhaseGVN* phase) const; virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); - const bool has_range_check() { + bool has_range_check() { #ifdef _LP64 return _range_check_dependency; #else diff --git a/src/hotspot/share/opto/cfgnode.hpp b/src/hotspot/share/opto/cfgnode.hpp index 48f484a3468..a9585b0106d 100644 --- a/src/hotspot/share/opto/cfgnode.hpp +++ b/src/hotspot/share/opto/cfgnode.hpp @@ -239,10 +239,10 @@ public: virtual const TypePtr *adr_type() const { verify_adr_type(true); return _adr_type; } void set_inst_mem_id(int inst_mem_id) { _inst_mem_id = inst_mem_id; } - const int inst_mem_id() const { return _inst_mem_id; } - const int inst_id() const { return _inst_id; } - const int inst_index() const { return _inst_index; } - const int inst_offset() const { return _inst_offset; } + int inst_mem_id() const { return _inst_mem_id; } + int inst_id() const { return _inst_id; } + int inst_index() const { return _inst_index; } + int inst_offset() const { return _inst_offset; } bool is_same_inst_field(const Type* tp, int mem_id, int id, int index, int offset) { return type()->basic_type() == tp->basic_type() && inst_mem_id() == mem_id && diff --git a/src/hotspot/share/opto/machnode.hpp b/src/hotspot/share/opto/machnode.hpp index a455e420d5d..fb9f2ed5143 100644 --- a/src/hotspot/share/opto/machnode.hpp +++ b/src/hotspot/share/opto/machnode.hpp @@ -898,9 +898,9 @@ public: float _cnt; // Estimate of number of times called bool _guaranteed_safepoint; // Do we need to observe safepoint? - const TypeFunc* tf() const { return _tf; } - const address entry_point() const { return _entry_point; } - const float cnt() const { return _cnt; } + const TypeFunc* tf() const { return _tf; } + address entry_point() const { return _entry_point; } + float cnt() const { return _cnt; } void set_tf(const TypeFunc* tf) { _tf = tf; } void set_entry_point(address p) { _entry_point = p; } diff --git a/src/hotspot/share/opto/matcher.cpp b/src/hotspot/share/opto/matcher.cpp index 744d58f8d4f..e8c62146cd2 100644 --- a/src/hotspot/share/opto/matcher.cpp +++ b/src/hotspot/share/opto/matcher.cpp @@ -447,7 +447,7 @@ static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) { return rms; } -const int Matcher::scalable_predicate_reg_slots() { +int Matcher::scalable_predicate_reg_slots() { assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(), "scalable predicate vector should be supported"); int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte; diff --git a/src/hotspot/share/opto/matcher.hpp b/src/hotspot/share/opto/matcher.hpp index 3198ac17d25..d7cff3d3f43 100644 --- a/src/hotspot/share/opto/matcher.hpp +++ b/src/hotspot/share/opto/matcher.hpp @@ -319,50 +319,50 @@ public: RegMask *_calling_convention_mask; // Array of RegMasks per argument // Does matcher have a match rule for this ideal node? - static const bool has_match_rule(int opcode); + static bool has_match_rule(int opcode); static const bool _hasMatchRule[_last_opcode]; // Does matcher have a match rule for this ideal node and is the // predicate (if there is one) true? // NOTE: If this function is used more commonly in the future, ADLC // should generate this one. - static const bool match_rule_supported(int opcode); + static bool match_rule_supported(int opcode); // Identify extra cases that we might want to vectorize automatically // And exclude cases which are not profitable to auto-vectorize. - static const bool match_rule_supported_superword(int opcode, int vlen, BasicType bt); + static bool match_rule_supported_superword(int opcode, int vlen, BasicType bt); // identify extra cases that we might want to provide match rules for // e.g. Op_ vector nodes and other intrinsics while guarding with vlen - static const bool match_rule_supported_vector(int opcode, int vlen, BasicType bt); + static bool match_rule_supported_vector(int opcode, int vlen, BasicType bt); - static const bool match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt); + static bool match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt); - static const bool vector_needs_partial_operations(Node* node, const TypeVect* vt); + static bool vector_needs_partial_operations(Node* node, const TypeVect* vt); static const RegMask* predicate_reg_mask(void); static const TypeVectMask* predicate_reg_type(const Type* elemTy, int length); // Vector width in bytes - static const int vector_width_in_bytes(BasicType bt); + static int vector_width_in_bytes(BasicType bt); // Limits on vector size (number of elements). - static const int max_vector_size(const BasicType bt); - static const int min_vector_size(const BasicType bt); - static const bool vector_size_supported(const BasicType bt, int size) { + static int max_vector_size(const BasicType bt); + static int min_vector_size(const BasicType bt); + static bool vector_size_supported(const BasicType bt, int size) { return (Matcher::max_vector_size(bt) >= size && Matcher::min_vector_size(bt) <= size); } // Limits on max vector size (number of elements) for auto-vectorization. - static const int superword_max_vector_size(const BasicType bt); + static int superword_max_vector_size(const BasicType bt); // Actual max scalable vector register length. - static const int scalable_vector_reg_size(const BasicType bt); + static int scalable_vector_reg_size(const BasicType bt); // Actual max scalable predicate register length. - static const int scalable_predicate_reg_slots(); + static int scalable_predicate_reg_slots(); // Vector ideal reg - static const uint vector_ideal_reg(int len); + static uint vector_ideal_reg(int len); // Vector length static uint vector_length(const Node* n); @@ -449,7 +449,7 @@ public: static RegMask c_frame_ptr_mask; // Java-Native vector calling convention - static const bool supports_vector_calling_convention(); + static bool supports_vector_calling_convention(); static OptoRegPair vector_return_value(uint ideal_reg); // Is this branch offset small enough to be addressed by a short branch? diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp index ca03ecd070a..73490e7be10 100644 --- a/src/hotspot/share/opto/node.hpp +++ b/src/hotspot/share/opto/node.hpp @@ -829,9 +829,9 @@ protected: } public: - const juint class_id() const { return _class_id; } + juint class_id() const { return _class_id; } - const juint flags() const { return _flags; } + juint flags() const { return _flags; } void add_flag(juint fl) { init_flags(fl); } diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp index 6cb74c67521..c37c0366394 100644 --- a/src/hotspot/share/opto/type.hpp +++ b/src/hotspot/share/opto/type.hpp @@ -999,8 +999,8 @@ public: const int _offset; // Offset into oop, with TOP & BOT const PTR _ptr; // Pointer equivalence class - const int offset() const { return _offset; } - const PTR ptr() const { return _ptr; } + int offset() const { return _offset; } + PTR ptr() const { return _ptr; } static const TypePtr *make(TYPES t, PTR ptr, int offset, const TypePtr* speculative = nullptr, diff --git a/src/hotspot/share/prims/jvmtiEnter.xsl b/src/hotspot/share/prims/jvmtiEnter.xsl index 1c8accfe51f..5b1232d1eb3 100644 --- a/src/hotspot/share/prims/jvmtiEnter.xsl +++ b/src/hotspot/share/prims/jvmtiEnter.xsl @@ -174,7 +174,7 @@ jint // Check Event Capabilities -const bool JvmtiUtil::has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr) { +bool JvmtiUtil::has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr) { switch (event_type) { diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp index 6908e1994ba..d98a725c7aa 100644 --- a/src/hotspot/share/prims/jvmtiTagMap.cpp +++ b/src/hotspot/share/prims/jvmtiTagMap.cpp @@ -1405,16 +1405,16 @@ class AdvancedHeapWalkContext: public HeapWalkContext { jint heap_filter() const { return _heap_filter; } Klass* klass_filter() const { return _klass_filter; } - const jvmtiHeapReferenceCallback heap_reference_callback() const { + jvmtiHeapReferenceCallback heap_reference_callback() const { return _heap_callbacks->heap_reference_callback; }; - const jvmtiPrimitiveFieldCallback primitive_field_callback() const { + jvmtiPrimitiveFieldCallback primitive_field_callback() const { return _heap_callbacks->primitive_field_callback; } - const jvmtiArrayPrimitiveValueCallback array_primitive_value_callback() const { + jvmtiArrayPrimitiveValueCallback array_primitive_value_callback() const { return _heap_callbacks->array_primitive_value_callback; } - const jvmtiStringPrimitiveValueCallback string_primitive_value_callback() const { + jvmtiStringPrimitiveValueCallback string_primitive_value_callback() const { return _heap_callbacks->string_primitive_value_callback; } }; diff --git a/src/hotspot/share/prims/jvmtiUtil.hpp b/src/hotspot/share/prims/jvmtiUtil.hpp index b0d8e5bebaf..aa3d26bf1ea 100644 --- a/src/hotspot/share/prims/jvmtiUtil.hpp +++ b/src/hotspot/share/prims/jvmtiUtil.hpp @@ -50,9 +50,9 @@ public: static const char* error_name(int num) { return _error_names[num]; } // To Do: add range checking - static const bool has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr); + static bool has_event_capability(jvmtiEvent event_type, const jvmtiCapabilities* capabilities_ptr); - static const bool event_threaded(int num) { + static bool event_threaded(int num) { if (num >= JVMTI_MIN_EVENT_TYPE_VAL && num <= JVMTI_MAX_EVENT_TYPE_VAL) { return _event_threaded[num]; } diff --git a/src/hotspot/share/runtime/threadSMR.hpp b/src/hotspot/share/runtime/threadSMR.hpp index e6b1a52e1a9..a379e820587 100644 --- a/src/hotspot/share/runtime/threadSMR.hpp +++ b/src/hotspot/share/runtime/threadSMR.hpp @@ -197,11 +197,11 @@ public: template void threads_do(T *cl) const; - uint length() const { return _length; } + uint length() const { return _length; } - JavaThread *const thread_at(uint i) const { return _threads[i]; } + JavaThread *thread_at(uint i) const { return _threads[i]; } - JavaThread *const *threads() const { return _threads; } + JavaThread *const *threads() const { return _threads; } // Returns -1 if target is not found. int find_index_of_JavaThread(JavaThread* target); diff --git a/src/hotspot/share/services/memoryService.hpp b/src/hotspot/share/services/memoryService.hpp index a00e49dd0a7..2d28f25c695 100644 --- a/src/hotspot/share/services/memoryService.hpp +++ b/src/hotspot/share/services/memoryService.hpp @@ -67,10 +67,10 @@ public: static MemoryPool* get_memory_pool(instanceHandle pool); static MemoryManager* get_memory_manager(instanceHandle mgr); - static const int num_memory_pools() { + static int num_memory_pools() { return _pools_list->length(); } - static const int num_memory_managers() { + static int num_memory_managers() { return _managers_list->length(); } diff --git a/src/hotspot/share/utilities/bitMap.hpp b/src/hotspot/share/utilities/bitMap.hpp index db821f2174e..4fa6fb5fda2 100644 --- a/src/hotspot/share/utilities/bitMap.hpp +++ b/src/hotspot/share/utilities/bitMap.hpp @@ -157,7 +157,7 @@ class BitMap { void set_word (idx_t word) { set_word(word, ~(bm_word_t)0); } void clear_word(idx_t word) { _map[word] = 0; } - static inline const bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order); + static inline bm_word_t load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order); // Utilities for ranges of bits. Ranges are half-open [beg, end). diff --git a/src/hotspot/share/utilities/bitMap.inline.hpp b/src/hotspot/share/utilities/bitMap.inline.hpp index d03b25d0cc4..20345425638 100644 --- a/src/hotspot/share/utilities/bitMap.inline.hpp +++ b/src/hotspot/share/utilities/bitMap.inline.hpp @@ -42,7 +42,7 @@ inline void BitMap::clear_bit(idx_t bit) { *word_addr(bit) &= ~bit_mask(bit); } -inline const BitMap::bm_word_t BitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) { +inline BitMap::bm_word_t BitMap::load_word_ordered(const volatile bm_word_t* const addr, atomic_memory_order memory_order) { if (memory_order == memory_order_relaxed || memory_order == memory_order_release) { return Atomic::load(addr); } else { diff --git a/src/hotspot/share/utilities/elfFile.hpp b/src/hotspot/share/utilities/elfFile.hpp index 579837ebf80..1b572fca400 100644 --- a/src/hotspot/share/utilities/elfFile.hpp +++ b/src/hotspot/share/utilities/elfFile.hpp @@ -308,7 +308,7 @@ class ElfFile: public CHeapObj { static uint gnu_debuglink_crc32(uint32_t crc, uint8_t* buf, size_t len); protected: - FILE* const fd() const { return _file; } + FILE* fd() const { return _file; } // Read the section header of section 'name'. bool read_section_header(const char* name, Elf_Shdr& hdr) const;